Merge branch 'Mintplex-Labs:master' into master

This commit is contained in:
Dietrich-at-Qvest 2025-02-12 14:12:40 +01:00 committed by GitHub
commit b2205eb443
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 2338 additions and 679 deletions
.github/workflows
collector
processLink/convert
processRawText
processSingleFile/convert
utils
extensions
Confluence
RepoLoader
GithubRepo
GitlabRepo
WebsiteDepth
YoutubeTranscript
files
tokenizer
frontend/src
components
LLMSelection/AzureAiOptions
Modals/ManageWorkspace/Documents/UploadFile
WorkspaceChat/ChatContainer/ChatHistory
Chartable
ThoughtContainer
index.css
locales
pages
Admin/Agents/SQLConnectorSelection
Main
OnboardingFlow/Steps
CreateWorkspace
DataHandling
Home
LLMPreference
Survey
UserSetup
WorkspaceSettings/AgentConfig/AgentModelSelection
server
models
utils
AiProviders
azureOpenAi
deepseek
modelMap.js
openAi
openRouter
perplexity
EmbeddingEngines
native
ollama
EmbeddingRerankers/native
agents/aibitat/plugins
helpers

View file

@ -6,7 +6,7 @@ concurrency:
on:
push:
branches: ['agent-ui-animations'] # put your current branch to create a build. Core team only.
branches: ['3069-tokenizer-collector-improvements'] # put your current branch to create a build. Core team only.
paths-ignore:
- '**.md'
- 'cloud-deployments/*'

View file

@ -41,7 +41,7 @@ async function scrapeGenericUrl(link, textOnly = false) {
published: new Date().toLocaleString(),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -55,7 +55,7 @@ async function processRawText(textContent, metadata) {
published: METADATA_KEYS.possible.published(metadata),
wordCount: textContent.split(" ").length,
pageContent: textContent,
token_count_estimate: tokenizeString(textContent).length,
token_count_estimate: tokenizeString(textContent),
};
const document = writeToServerDocuments(

View file

@ -56,7 +56,7 @@ async function asAudio({ fullFilePath = "", filename = "", options = {} }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -42,7 +42,7 @@ async function asDocX({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -40,7 +40,7 @@ async function asEPub({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -53,7 +53,7 @@ async function asMbox({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
item++;

View file

@ -38,7 +38,7 @@ async function asOfficeMime({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -49,7 +49,7 @@ async function asPdf({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -38,7 +38,7 @@ async function asTxt({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -67,7 +67,7 @@ async function asXlsx({ fullFilePath = "", filename = "" }) {
published: createdDate(fullFilePath),
wordCount: content.split(/\s+/).length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
const document = writeToServerDocuments(

View file

@ -104,7 +104,7 @@ async function loadConfluence(
published: new Date().toLocaleString(),
wordCount: doc.pageContent.split(" ").length,
pageContent: doc.pageContent,
token_count_estimate: tokenizeString(doc.pageContent).length,
token_count_estimate: tokenizeString(doc.pageContent),
};
console.log(

View file

@ -66,7 +66,7 @@ async function loadGithubRepo(args, response) {
published: new Date().toLocaleString(),
wordCount: doc.pageContent.split(" ").length,
pageContent: doc.pageContent,
token_count_estimate: tokenizeString(doc.pageContent).length,
token_count_estimate: tokenizeString(doc.pageContent),
};
console.log(
`[Github Loader]: Saving ${doc.metadata.source} to ${outFolder}`

View file

@ -82,7 +82,7 @@ async function loadGitlabRepo(args, response) {
}
data.wordCount = pageContent.split(" ").length;
data.token_count_estimate = tokenizeString(pageContent).length;
data.token_count_estimate = tokenizeString(pageContent);
data.pageContent = pageContent;
console.log(

View file

@ -122,7 +122,7 @@ async function bulkScrapePages(links, outFolderPath) {
published: new Date().toLocaleString(),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
writeToServerDocuments(data, data.title, outFolderPath);

View file

@ -115,7 +115,7 @@ async function loadYouTubeTranscript({ url }) {
published: new Date().toLocaleString(),
wordCount: content.split(" ").length,
pageContent: content,
token_count_estimate: tokenizeString(content).length,
token_count_estimate: tokenizeString(content),
};
console.log(`[YouTube Loader]: Saving ${metadata.title} to ${outFolder}`);

View file

@ -2,16 +2,62 @@ const fs = require("fs");
const path = require("path");
const { MimeDetector } = require("./mime");
/**
* Checks if a file is text by checking the mime type and then falling back to buffer inspection.
* This way we can capture all the cases where the mime type is not known but still parseable as text
* without having to constantly add new mime type overrides.
* @param {string} filepath - The path to the file.
* @returns {boolean} - Returns true if the file is text, false otherwise.
*/
function isTextType(filepath) {
try {
if (!fs.existsSync(filepath)) return false;
const result = isKnownTextMime(filepath);
if (result.valid) return true; // Known text type - return true.
if (result.reason !== "generic") return false; // If any other reason than generic - return false.
return parseableAsText(filepath); // Fallback to parsing as text via buffer inspection.
}
/**
* Checks if a file is known to be text by checking the mime type.
* @param {string} filepath - The path to the file.
* @returns {boolean} - Returns true if the file is known to be text, false otherwise.
*/
function isKnownTextMime(filepath) {
try {
const mimeLib = new MimeDetector();
const mime = mimeLib.getType(filepath);
if (mimeLib.badMimes.includes(mime)) return false;
if (mimeLib.badMimes.includes(mime))
return { valid: false, reason: "bad_mime" };
const type = mime.split("/")[0];
if (mimeLib.nonTextTypes.includes(type)) return false;
return true;
if (mimeLib.nonTextTypes.includes(type))
return { valid: false, reason: "non_text_mime" };
return { valid: true, reason: "valid_mime" };
} catch (e) {
return { valid: false, reason: "generic" };
}
}
/**
* Checks if a file is parseable as text by forcing it to be read as text in utf8 encoding.
* If the file looks too much like a binary file, it will return false.
* @param {string} filepath - The path to the file.
* @returns {boolean} - Returns true if the file is parseable as text, false otherwise.
*/
function parseableAsText(filepath) {
try {
const fd = fs.openSync(filepath, "r");
const buffer = Buffer.alloc(1024); // Read first 1KB of the file synchronously
const bytesRead = fs.readSync(fd, buffer, 0, 1024, 0);
fs.closeSync(fd);
const content = buffer.subarray(0, bytesRead).toString("utf8");
const nullCount = (content.match(/\0/g) || []).length;
const controlCount = (content.match(/[\x00-\x08\x0B\x0C\x0E-\x1F]/g) || [])
.length;
const threshold = bytesRead * 0.1;
return nullCount + controlCount < threshold;
} catch {
return false;
}

View file

@ -1,7 +1,6 @@
const MimeLib = require("mime");
const path = require("path");
class MimeDetector {
nonTextTypes = ["multipart", "image", "model", "audio", "video"];
nonTextTypes = ["multipart", "image", "model", "audio", "video", "font"];
badMimes = [
"application/octet-stream",
"application/zip",
@ -48,11 +47,6 @@ class MimeDetector {
);
}
// These are file types that are not detected by the mime library and need to be processed as text files.
// You should only add file types that are not detected by the mime library, are parsable as text, and are files
// with no extension. Otherwise, their extension should be added to the overrides array.
#specialTextFileTypes = ["dockerfile", "jenkinsfile", "dockerignore"];
/**
* Returns the MIME type of the file. If the file has no extension found, it will be processed as a text file.
* @param {string} filepath
@ -61,12 +55,6 @@ class MimeDetector {
getType(filepath) {
const parsedMime = this.lib.getType(filepath);
if (!!parsedMime) return parsedMime;
// If the mime could not be parsed, it could be a special file type like Dockerfile or Jenkinsfile
// which we can reliably process as text files.
const baseName = path.basename(filepath)?.toLowerCase();
if (this.#specialTextFileTypes.includes(baseName)) return "text/plain";
return null;
}
}

View file

@ -1,15 +1,66 @@
const { getEncoding } = require("js-tiktoken");
function tokenizeString(input = "") {
class TikTokenTokenizer {
static MAX_KB_ESTIMATE = 10;
static DIVISOR = 8;
constructor() {
if (TikTokenTokenizer.instance) {
this.log(
"Singleton instance already exists. Returning existing instance."
);
return TikTokenTokenizer.instance;
}
this.encoder = getEncoding("cl100k_base");
TikTokenTokenizer.instance = this;
this.log("Initialized new TikTokenTokenizer instance.");
}
log(text, ...args) {
console.log(`\x1b[35m[TikTokenTokenizer]\x1b[0m ${text}`, ...args);
}
/**
* Check if the input is too long to encode
* this is more of a rough estimate and a sanity check to prevent
* CPU issues from encoding too large of strings
* Assumes 1 character = 2 bytes in JS
* @param {string} input
* @returns {boolean}
*/
#isTooLong(input) {
const bytesEstimate = input.length * 2;
const kbEstimate = Math.floor(bytesEstimate / 1024);
return kbEstimate >= TikTokenTokenizer.MAX_KB_ESTIMATE;
}
/**
* Encode a string into tokens for rough token count estimation.
* @param {string} input
* @returns {number}
*/
tokenizeString(input = "") {
try {
const encoder = getEncoding("cl100k_base");
return encoder.encode(input);
if (this.#isTooLong(input)) {
this.log("Input will take too long to encode - estimating");
return Math.ceil(input.length / TikTokenTokenizer.DIVISOR);
}
return this.encoder.encode(input).length;
} catch (e) {
console.error("Could not tokenize string!");
return [];
this.log("Could not tokenize string! Estimating...", e.message, e.stack);
return Math.ceil(input?.length / TikTokenTokenizer.DIVISOR) || 0;
}
}
}
const tokenizer = new TikTokenTokenizer();
module.exports = {
tokenizeString,
/**
* Encode a string into tokens for rough token count estimation.
* @param {string} input
* @returns {number}
*/
tokenizeString: (input) => tokenizer.tokenizeString(input),
};

View file

@ -71,6 +71,21 @@ export default function AzureAiOptions({ settings }) {
</option>
</select>
</div>
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Model Type
</label>
<select
name="AzureOpenAiModelType"
defaultValue={settings?.AzureOpenAiModelType || "default"}
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
required={true}
>
<option value="default">Default</option>
<option value="reasoning">Reasoning</option>
</select>
</div>
</div>
</div>
);

View file

@ -40,9 +40,11 @@ export default function UploadFile({
setFetchingUrl(false);
};
// Don't spam fetchKeys, wait 1s between calls at least.
const handleUploadSuccess = debounce(() => fetchKeys(true), 1000);
const handleUploadError = (_msg) => null; // stubbed.
// Queue all fetchKeys calls through the same debouncer to prevent spamming the server.
// either a success or error will trigger a fetchKeys call so the UI is not stuck loading.
const debouncedFetchKeys = debounce(() => fetchKeys(true), 1000);
const handleUploadSuccess = () => debouncedFetchKeys();
const handleUploadError = () => debouncedFetchKeys();
const onDrop = async (acceptedFiles, rejections) => {
const newAccepted = acceptedFiles.map((file) => {

View file

@ -46,9 +46,9 @@ export default function Tooltip({ legendColor, ...props }) {
{...props}
content={({ active, payload, label }) => {
return active && payload ? (
<div className="bg-white text-sm rounded-md border shadow-lg">
<div className="bg-theme-bg-primary text-sm rounded-md border shadow-lg">
<div className="border-b py-2 px-4">
<p className="text-elem text-gray-700 font-medium">{label}</p>
<p className="text-theme-bg-primary font-medium">{label}</p>
</div>
<div className="space-y-1 py-2 px-4">
{payload.map(({ value, name }, idx) => (
@ -58,7 +58,7 @@ export default function Tooltip({ legendColor, ...props }) {
>
<div className="flex items-center space-x-2">
<span
className="shrink-0 h-3 w-3 border-white rounded-md rounded-full border-2 shadow-md"
className="shrink-0 h-3 w-3 border-theme-bg-primary rounded-md rounded-full border-2 shadow-md"
style={{ backgroundColor: legendColor }}
/>
<p

View file

@ -75,8 +75,10 @@ export function Chartable({ props, workspace }) {
switch (chartType) {
case "area":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
<AreaChart
className="h-[350px]"
data={data}
@ -90,8 +92,10 @@ export function Chartable({ props, workspace }) {
);
case "bar":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
<BarChart
className="h-[350px]"
data={data}
@ -107,8 +111,10 @@ export function Chartable({ props, workspace }) {
);
case "line":
return (
<div className="bg-zinc-900 p-8 pb-12 rounded-xl text-white h-[500px] w-full">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 pb-12 rounded-xl text-white h-[500px] w-full light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
<LineChart
className="h-[400px]"
data={data}
@ -122,8 +128,10 @@ export function Chartable({ props, workspace }) {
);
case "composed":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<Legend
categories={[value]}
@ -178,8 +186,10 @@ export function Chartable({ props, workspace }) {
);
case "scatter":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<div className="flex justify-end">
<Legend
@ -224,8 +234,10 @@ export function Chartable({ props, workspace }) {
);
case "pie":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
<DonutChart
data={data}
category={value}
@ -248,8 +260,10 @@ export function Chartable({ props, workspace }) {
);
case "radar":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<div className="flex justify-end">
<Legend
@ -282,8 +296,10 @@ export function Chartable({ props, workspace }) {
);
case "radialbar":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<div className="flex justify-end">
<Legend
@ -317,8 +333,10 @@ export function Chartable({ props, workspace }) {
);
case "treemap":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<div className="flex justify-end">
<Legend
@ -343,8 +361,10 @@ export function Chartable({ props, workspace }) {
);
case "funnel":
return (
<div className="bg-zinc-900 p-8 rounded-xl text-white">
<h3 className="text-lg font-medium">{title}</h3>
<div className="bg-theme-bg-primary p-8 rounded-xl text-white light:border light:border-theme-border-primary">
<h3 className="text-lg text-theme-text-primary font-medium">
{title}
</h3>
{showLegend && (
<div className="flex justify-end">
<Legend
@ -413,7 +433,7 @@ const customTooltip = (props) => {
const categoryPayload = payload?.[0];
if (!categoryPayload) return null;
return (
<div className="w-56 bg-zinc-400 rounded-lg border p-2 text-white">
<div className="w-56 bg-theme-bg-primary rounded-lg border p-2 text-white">
<div className="flex flex-1 space-x-2.5">
<div
className={`flex w-1.5 flex-col bg-${categoryPayload?.color}-500 rounded`}
@ -447,13 +467,13 @@ function DownloadGraph({ onClick }) {
<div className="p-1 rounded-full border-none">
{loading ? (
<CircleNotch
className="text-white/50 w-5 h-5 animate-spin"
className="text-theme-text-primary w-5 h-5 animate-spin"
aria-label="Downloading image..."
/>
) : (
<DownloadSimple
weight="bold"
className="text-white/50 w-5 h-5 hover:text-white"
className="text-theme-text-primary w-5 h-5 hover:text-theme-text-primary"
onClick={handleClick}
aria-label="Download graph image"
/>

View file

@ -84,7 +84,7 @@ export const ThoughtChainComponent = forwardRef(
<div className="flex-1 overflow-hidden">
{!isExpanded && !autoExpand ? (
<span
className="text-xs text-theme-text-secondary font-mono inline-block w-full"
className="text-theme-text-secondary font-mono inline-block w-full"
dangerouslySetInnerHTML={{
__html: DOMPurify.sanitize(
truncate(tagStrippedContent, THOUGHT_PREVIEW_LENGTH)
@ -93,7 +93,7 @@ export const ThoughtChainComponent = forwardRef(
/>
) : (
<span
className="text-xs text-theme-text-secondary font-mono inline-block w-full"
className="text-theme-text-secondary font-mono inline-block w-full"
dangerouslySetInnerHTML={{
__html: DOMPurify.sanitize(
renderMarkdown(tagStrippedContent)

View file

@ -836,6 +836,10 @@ does not extend the close button beyond the viewport. */
fill: #fff;
}
[data-theme="light"] .recharts-text > * {
fill: #000;
}
.recharts-legend-wrapper {
margin-bottom: 10px;
}

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Name der Arbeitsbereiche",
error: "Fehler",
@ -9,8 +58,10 @@ const TRANSLATIONS = {
save: "Änderungen speichern",
previous: "Vorherige Seite",
next: "Nächste Seite",
optional: null,
yes: null,
no: null,
},
settings: {
title: "Instanzeinstellungen",
system: "Allgemeine Einstellungen",
@ -41,7 +92,6 @@ const TRANSLATIONS = {
"back-to-workspaces": "zurück zum Arbeitsbereich",
open: "Einstellungen öffnen",
},
login: {
"multi-user": {
welcome: "Willkommen bei",
@ -65,7 +115,6 @@ const TRANSLATIONS = {
"back-to-login": "Zurück zur Anmeldung",
},
},
welcomeMessage: {
part1:
"Willkommen bei AnythingLLM, AnythingLLM ist ein Open-Source-KI-Tool von Mintplex Labs, das alles in einen trainierten Chatbot verwandelt, den Sie abfragen und mit dem Sie chatten können. AnythingLLM ist eine BYOK-Software (Bring-Your-Own-Keys), daher gibt es keine Abonnements, Gebühren oder Kosten für diese Software außerhalb der Dienste, die Sie damit nutzen möchten.",
@ -87,12 +136,10 @@ const TRANSLATIONS = {
starOnGithub: "Stern auf GitHub",
contact: "Kontaktieren Sie Mintplex Labs",
},
"new-workspace": {
title: "Neuer Arbeitsbereich",
placeholder: "Mein Arbeitsbereich",
},
"workspaces—settings": {
general: "Allgemeine Einstellungen",
chat: "Chat-Einstellungen",
@ -100,7 +147,6 @@ const TRANSLATIONS = {
members: "Mitglieder",
agent: "Agentenkonfiguration",
},
general: {
vector: {
title: "Vektoranzahl",
@ -148,7 +194,6 @@ const TRANSLATIONS = {
"Arbeitsbereich zu löschen. Dies entfernt alle Vektoreinbettungen in Ihrer Vektordatenbank.\n\nDie ursprünglichen Quelldateien bleiben unberührt. Diese Aktion ist irreversibel.",
},
},
chat: {
llm: {
title: "Arbeitsbereich-LLM-Anbieter",
@ -214,7 +259,6 @@ const TRANSLATIONS = {
hint: "Die meisten LLMs haben verschiedene akzeptable Bereiche gültiger Werte. Konsultieren Sie Ihren LLM-Anbieter für diese Informationen.",
},
},
"vector-workspace": {
identifier: "Vektordatenbank-Identifikator",
snippets: {
@ -242,7 +286,6 @@ const TRANSLATIONS = {
success: "Die Arbeitsbereich-Vektordatenbank wurde zurückgesetzt!",
},
},
agent: {
"performance-warning":
"Die Leistung von LLMs, die Werkzeugaufrufe nicht explizit unterstützen, hängt stark von den Fähigkeiten und der Genauigkeit des Modells ab. Einige Fähigkeiten können eingeschränkt oder nicht funktionsfähig sein.",
@ -262,7 +305,6 @@ const TRANSLATIONS = {
"Das spezifische LLM-Modell, das für den @agent-Agenten dieses Arbeitsbereichs verwendet wird.",
wait: "-- warte auf Modelle --",
},
skill: {
title: "Standard-Agentenfähigkeiten",
description:
@ -307,7 +349,6 @@ const TRANSLATIONS = {
custom: "dedizierte Agenten gibt es bald!",
},
},
recorded: {
title: "Arbeitsbereich-Chats",
description:
@ -322,7 +363,6 @@ const TRANSLATIONS = {
at: "Gesendet am",
},
},
appearance: {
title: "Erscheinungsbild",
description: "Passen Sie die Erscheinungseinstellungen Ihrer Plattform an.",
@ -355,7 +395,6 @@ const TRANSLATIONS = {
link: "Link",
},
},
api: {
title: "API-Schlüssel",
description:
@ -368,14 +407,12 @@ const TRANSLATIONS = {
created: "Erstellt",
},
},
llm: {
title: "LLM-Präferenz",
description:
"Dies sind die Anmeldeinformationen und Einstellungen für Ihren bevorzugten LLM-Chat- und Einbettungsanbieter. Es ist wichtig, dass diese Schlüssel aktuell und korrekt sind, sonst wird AnythingLLM nicht richtig funktionieren.",
provider: "LLM-Anbieter",
},
transcription: {
title: "Transkriptionsmodell-Präferenz",
description:
@ -388,7 +425,6 @@ const TRANSLATIONS = {
"warn-end":
"Das eingebaute Modell wird bei der ersten Verwendung automatisch heruntergeladen.",
},
embedding: {
title: "Einbettungspräferenz",
"desc-start":
@ -401,7 +437,6 @@ const TRANSLATIONS = {
"Bei Verwendung der nativen Einbettungs-Engine von AnythingLLM ist keine Einrichtung erforderlich.",
},
},
text: {
title: "Textsplitting & Chunking-Präferenzen",
"desc-start":
@ -417,14 +452,12 @@ const TRANSLATIONS = {
"Dies ist die maximale Länge der Zeichen, die in einem einzelnen Vektor vorhanden sein können.",
recommend: "Die maximale Länge des Einbettungsmodells beträgt",
},
overlap: {
title: "Textchunk-Überlappung",
description:
"Dies ist die maximale Überlappung von Zeichen, die während des Chunkings zwischen zwei benachbarten Textchunks auftritt.",
},
},
vector: {
title: "Vektordatenbank",
description:
@ -434,7 +467,6 @@ const TRANSLATIONS = {
description: "Für LanceDB ist keine Konfiguration erforderlich.",
},
},
embeddable: {
title: "Einbettbare Chat-Widgets",
description:
@ -446,7 +478,6 @@ const TRANSLATIONS = {
Active: "Aktive Domains",
},
},
"embed-chats": {
title: "Eingebettete Chats",
export: "Exportieren",
@ -460,7 +491,6 @@ const TRANSLATIONS = {
at: "Gesendet am",
},
},
multi: {
title: "Mehrbenutzer-Modus",
description:
@ -485,7 +515,6 @@ const TRANSLATIONS = {
password: "Instanz-Passwort",
},
},
event: {
title: "Ereignisprotokolle",
description:
@ -497,7 +526,6 @@ const TRANSLATIONS = {
occurred: "Aufgetreten am",
},
},
privacy: {
title: "Datenschutz & Datenverarbeitung",
description:

View file

@ -1,4 +1,62 @@
const TRANSLATIONS = {
onboarding: {
home: {
title: "Welcome to",
getStarted: "Get Started",
},
llm: {
title: "LLM Preference",
description:
"AnythingLLM can work with many LLM providers. This will be the service which handles chatting.",
},
userSetup: {
title: "User Setup",
description: "Configure your user settings.",
howManyUsers: "How many users will be using this instance?",
justMe: "Just me",
myTeam: "My team",
instancePassword: "Instance Password",
setPassword: "Would you like to set up a password?",
passwordReq: "Passwords must be at least 8 characters.",
passwordWarn:
"It's important to save this password because there is no recovery method.",
adminUsername: "Admin account username",
adminUsernameReq:
"Username must be at least 6 characters long and only contain lowercase letters, numbers, underscores, and hyphens with no spaces.",
adminPassword: "Admin account password",
adminPasswordReq: "Passwords must be at least 8 characters.",
teamHint:
"By default, you will be the only admin. Once onboarding is completed you can create and invite others to be users or admins. Do not lose your password as only admins can reset passwords.",
},
data: {
title: "Data Handling & Privacy",
description:
"We are committed to transparency and control when it comes to your personal data.",
settingsHint:
"These settings can be reconfigured at any time in the settings.",
},
survey: {
title: "Welcome to AnythingLLM",
description: "Help us make AnythingLLM built for your needs. Optional.",
email: "What's your email?",
useCase: "What will you use AnythingLLM for?",
useCaseWork: "For work",
useCasePersonal: "For personal use",
useCaseOther: "Other",
comment: "How did you hear about AnythingLLM?",
commentPlaceholder:
"Reddit, Twitter, GitHub, YouTube, etc. - Let us know how you found us!",
skip: "Skip Survey",
thankYou: "Thank you for your feedback!",
},
workspace: {
title: "Create your first workspace",
description:
"Create your first workspace and get started with AnythingLLM.",
},
},
common: {
"workspaces-name": "Workspaces Name",
error: "error",
@ -9,6 +67,9 @@ const TRANSLATIONS = {
save: "Save changes",
previous: "Previous Page",
next: "Next Page",
optional: "Optional",
yes: "Yes",
no: "No",
},
// Setting Sidebar menu items.

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Nombre de espacios de trabajo",
error: "error",
@ -9,8 +58,10 @@ const TRANSLATIONS = {
save: "Guardar cambios",
previous: "Página anterior",
next: "Página siguiente",
optional: null,
yes: null,
no: null,
},
settings: {
title: "Configuración de instancia",
system: "Preferencias del sistema",
@ -39,7 +90,6 @@ const TRANSLATIONS = {
contact: "Contactar Soporte",
"browser-extension": "Extensión del navegador",
},
login: {
"multi-user": {
welcome: "Bienvenido a",
@ -63,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Volver al inicio de sesión",
},
},
welcomeMessage: {
part1:
"Bienvenido a AnythingLLM, una herramienta de inteligencia artificial de código abierto creada por Mintplex Labs que convierte cualquier cosa en un chatbot entrenado con el que puedes consultar y conversar. AnythingLLM es un software BYOK (bring-your-own-keys), por lo que no hay suscripciones, tarifas ni cargos por este software, salvo por los servicios que deseas utilizar.",
@ -85,12 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Estrella en GitHub",
contact: "Contactar a Mintplex Labs",
},
"new-workspace": {
title: "Nuevo Espacio de Trabajo",
placeholder: "Mi Espacio de Trabajo",
},
"workspaces—settings": {
general: "Configuración general",
chat: "Configuración de chat",
@ -98,7 +145,6 @@ const TRANSLATIONS = {
members: "Miembros",
agent: "Configuración del agente",
},
general: {
vector: {
title: "Conteo de vectores",
@ -135,7 +181,6 @@ const TRANSLATIONS = {
"espacio de trabajo. Esto eliminará todas las incrustaciones de vectores en tu base de datos de vectores.\n\nLos archivos de origen originales permanecerán intactos. Esta acción es irreversible.",
},
},
chat: {
llm: {
title: "Proveedor LLM del espacio de trabajo",
@ -194,7 +239,6 @@ const TRANSLATIONS = {
hint: "La mayoría de los LLM tienen varios rangos aceptables de valores válidos. Consulta a tu proveedor de LLM para obtener esa información.",
},
},
"vector-workspace": {
identifier: "Identificador de la base de datos de vectores",
snippets: {
@ -223,7 +267,6 @@ const TRANSLATIONS = {
"¡La base de datos de vectores del espacio de trabajo fue restablecida!",
},
},
agent: {
"performance-warning":
"El rendimiento de los LLM que no admiten explícitamente la llamada de herramientas depende en gran medida de las capacidades y la precisión del modelo. Algunas habilidades pueden estar limitadas o no funcionar.",
@ -243,7 +286,6 @@ const TRANSLATIONS = {
"El modelo LLM específico que se utilizará para el agente @agent de este espacio de trabajo.",
wait: "-- esperando modelos --",
},
skill: {
title: "Habilidades predeterminadas del agente",
description:
@ -282,7 +324,6 @@ const TRANSLATIONS = {
},
},
},
recorded: {
title: "Chats del espacio de trabajo",
description:
@ -297,7 +338,6 @@ const TRANSLATIONS = {
at: "Enviado a",
},
},
appearance: {
title: "Apariencia",
description: "Personaliza la configuración de apariencia de tu plataforma.",
@ -330,7 +370,6 @@ const TRANSLATIONS = {
link: "Enlace",
},
},
api: {
title: "Claves API",
description:
@ -343,14 +382,12 @@ const TRANSLATIONS = {
created: "Creado",
},
},
llm: {
title: "Preferencia de LLM",
description:
"Estas son las credenciales y configuraciones para tu proveedor preferido de chat y incrustación de LLM. Es importante que estas claves estén actualizadas y correctas, de lo contrario AnythingLLM no funcionará correctamente.",
provider: "Proveedor de LLM",
},
transcription: {
title: "Preferencia de modelo de transcripción",
description:
@ -363,7 +400,6 @@ const TRANSLATIONS = {
"warn-end":
"El modelo incorporado se descargará automáticamente en el primer uso.",
},
embedding: {
title: "Preferencia de incrustación",
"desc-start":
@ -376,7 +412,6 @@ const TRANSLATIONS = {
"No se requiere configuración cuando se utiliza el motor de incrustación nativo de AnythingLLM.",
},
},
text: {
title: "Preferencias de división y fragmentación de texto",
"desc-start":
@ -392,14 +427,12 @@ const TRANSLATIONS = {
"Esta es la longitud máxima de caracteres que puede estar presente en un solo vector.",
recommend: "La longitud máxima del modelo de incrustación es",
},
overlap: {
title: "Superposición de fragmentos de texto",
description:
"Esta es la superposición máxima de caracteres que ocurre durante la fragmentación entre dos fragmentos de texto adyacentes.",
},
},
vector: {
title: "Base de datos de vectores",
description:
@ -409,7 +442,6 @@ const TRANSLATIONS = {
description: "No se necesita configuración para LanceDB.",
},
},
embeddable: {
title: "Widgets de chat incrustables",
description:
@ -421,7 +453,6 @@ const TRANSLATIONS = {
Active: "Dominios activos",
},
},
"embed-chats": {
title: "Incrustar chats",
export: "Exportar",
@ -435,7 +466,6 @@ const TRANSLATIONS = {
at: "Enviado a",
},
},
multi: {
title: "Modo multiusuario",
description:
@ -460,7 +490,6 @@ const TRANSLATIONS = {
password: "Contraseña de la instancia",
},
},
event: {
title: "Registros de eventos",
description:
@ -472,7 +501,6 @@ const TRANSLATIONS = {
occurred: "Ocurrido a",
},
},
privacy: {
title: "Privacidad y manejo de datos",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "نام فضای کار",
error: "خطا",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "ذخیره تغییرات",
previous: "صفحه قبلی",
next: "صفحه بعدی",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "تنظیمات سامانه",
system: "تنظیمات عمومی",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "تماس با پشتیبانی",
"browser-extension": "افزونه مرورگر",
},
// Page Definitions
login: {
"multi-user": {
welcome: "خوش آمدید به",
@ -64,7 +112,6 @@ const TRANSLATIONS = {
"back-to-login": "بازگشت به صفحه ورود",
},
},
welcomeMessage: {
part1:
"به AnythingLLM خوش آمدید. AnythingLLM یک ابزار هوش مصنوعی متن‌باز توسط Mintplex Labs است که هر چیزی را به یک ربات گفتگوی آموزش‌دیده تبدیل می‌کند که می‌توانید با آن گفتگو و پرس‌وجو کنید. AnythingLLM یک نرم‌افزار BYOK (آوردن کلیدهای خودتان) است، بنابراین هیچ اشتراک، هزینه یا مبلغی برای این نرم‌افزار خارج از سرویس‌هایی که می‌خواهید با آن استفاده کنید، وجود ندارد.",
@ -86,13 +133,10 @@ const TRANSLATIONS = {
starOnGithub: "ستاره در گیت‌هاب",
contact: "تماس با Mintplex Labs",
},
"new-workspace": {
title: "فضای کاری جدید",
placeholder: "فضای کاری من",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "تنظیمات عمومی",
chat: "تنظیمات گفتگو",
@ -100,8 +144,6 @@ const TRANSLATIONS = {
members: "اعضا",
agent: "پیکربندی عامل",
},
// General Appearance
general: {
vector: {
title: "تعداد بردارها",
@ -136,8 +178,6 @@ const TRANSLATIONS = {
"فضای کاری هستید. این کار تمام جاسازی‌های برداری را از پایگاه داده برداری شما حذف خواهد کرد.\n\nفایلهای اصلی منبع دست نخورده باقی خواهند ماند. این عمل برگشت‌ناپذیر است.",
},
},
// Chat Settings
chat: {
llm: {
title: "ارائه‌دهنده LLM فضای کاری",
@ -195,8 +235,6 @@ const TRANSLATIONS = {
hint: "اکثر LLMها محدوده‌های مختلفی از مقادیر معتبر را دارند. برای این اطلاعات به ارائه‌دهنده LLM خود مراجعه کنید.",
},
},
// Vector Database
"vector-workspace": {
identifier: "شناسه پایگاه داده برداری",
snippets: {
@ -223,8 +261,6 @@ const TRANSLATIONS = {
success: "پایگاه داده برداری فضای کاری بازنشانی شد!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"عملکرد LLMهایی که به طور صریح از فراخوانی ابزار پشتیبانی نمی‌کنند، به شدت به قابلیت‌ها و دقت مدل وابسته است. برخی توانایی‌ها ممکن است محدود یا غیرفعال باشند.",
@ -282,8 +318,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "گفتگوهای فضای کاری",
description:
@ -298,8 +332,6 @@ const TRANSLATIONS = {
at: "زمان ارسال",
},
},
// Appearance
appearance: {
title: "ظاهر",
description: "تنظیمات ظاهری پلتفرم خود را شخصی‌سازی کنید.",
@ -331,8 +363,6 @@ const TRANSLATIONS = {
link: "لینک",
},
},
// API Keys
api: {
title: "کلیدهای API",
description:
@ -345,14 +375,12 @@ const TRANSLATIONS = {
created: "تاریخ ایجاد",
},
},
llm: {
title: "ترجیحات مدل زبانی",
description:
"این‌ها اعتبارنامه‌ها و تنظیمات ارائه‌دهنده مدل زبانی و جاسازی انتخابی شما هستند. مهم است که این کلیدها به‌روز و صحیح باشند در غیر این صورت AnythingLLM به درستی کار نخواهد کرد.",
provider: "ارائه‌دهنده مدل زبانی",
},
transcription: {
title: "ترجیحات مدل رونویسی",
description:
@ -364,7 +392,6 @@ const TRANSLATIONS = {
"ما حداقل ۲ گیگابایت RAM و آپلود فایل‌های کمتر از ۱۰ مگابایت را توصیه می‌کنیم.",
"warn-end": "مدل داخلی در اولین استفاده به صورت خودکار دانلود خواهد شد.",
},
embedding: {
title: "ترجیحات جاسازی",
"desc-start":
@ -377,7 +404,6 @@ const TRANSLATIONS = {
"هنگام استفاده از موتور جاسازی داخلی AnythingLLM نیازی به تنظیمات نیست.",
},
},
text: {
title: "تقسیم متن و تکه‌بندی",
"desc-start":
@ -399,8 +425,6 @@ const TRANSLATIONS = {
"این حداکثر همپوشانی کاراکترها است که در هنگام تکه‌بندی بین دو بخش متن مجاور رخ می‌دهد.",
},
},
// Vector Database
vector: {
title: "پایگاه داده برداری",
description:
@ -410,8 +434,6 @@ const TRANSLATIONS = {
description: "برای LanceDB نیازی به پیکربندی نیست.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "جاسازی گفتگو",
description:
@ -423,7 +445,6 @@ const TRANSLATIONS = {
Active: "دامنه‌های فعال",
},
},
"embed-chats": {
title: "گفتگوهای جاسازی شده",
export: "خروجی‌گیری",
@ -437,7 +458,6 @@ const TRANSLATIONS = {
at: "زمان ارسال",
},
},
multi: {
title: "حالت چند کاربره",
description:
@ -462,8 +482,6 @@ const TRANSLATIONS = {
password: "رمز عبور نمونه",
},
},
// Event Logs
event: {
title: "گزارش رویدادها",
description:
@ -475,8 +493,6 @@ const TRANSLATIONS = {
occurred: "زمان وقوع",
},
},
// Privacy & Data-Handling
privacy: {
title: "حریم خصوصی و مدیریت داده‌ها",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Nom des espaces de travail",
error: "erreur",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "Enregistrer les modifications",
previous: "Page précédente",
next: "Page suivante",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "Paramètres de l'instance",
system: "Préférences système",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "Contacter le Support",
"browser-extension": "Extension de navigateur",
},
// Page Definitions
login: {
"multi-user": {
welcome: "Bienvenue à",
@ -65,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Retour à la connexion",
},
},
welcomeMessage: {
part1:
"Bienvenue sur AnythingLLM, un outil d'intelligence artificielle open-source créé par Mintplex Labs qui transforme n'importe quoi en un chatbot entraîné avec lequel vous pouvez interroger et discuter. AnythingLLM est un logiciel BYOK (apportez vos propres clés), il n'y a donc pas d'abonnement, de frais ou de charges pour ce logiciel en dehors des services que vous souhaitez utiliser.",
@ -87,13 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Étoile sur GitHub",
contact: "Contacter Mintplex Labs",
},
"new-workspace": {
title: "Nouveau Espace de Travail",
placeholder: "Mon Espace de Travail",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "Paramètres généraux",
chat: "Paramètres de chat",
@ -101,8 +145,6 @@ const TRANSLATIONS = {
members: "Membres",
agent: "Configuration de l'agent",
},
// General Appearance
general: {
vector: {
title: "Nombre de vecteurs",
@ -140,8 +182,6 @@ const TRANSLATIONS = {
"espace de travail. Cela supprimera toutes les intégrations vectorielles dans votre base de données vectorielle.\n\nLes fichiers source originaux resteront intacts. Cette action est irréversible.",
},
},
// Chat Settings
chat: {
llm: {
title: "Fournisseur LLM de l'espace de travail",
@ -200,8 +240,6 @@ const TRANSLATIONS = {
hint: "La plupart des LLM ont diverses plages acceptables de valeurs valides. Consultez votre fournisseur LLM pour cette information.",
},
},
// Vector Database
"vector-workspace": {
identifier: "Identifiant de la base de données vectorielle",
snippets: {
@ -230,8 +268,6 @@ const TRANSLATIONS = {
"La base de données vectorielle de l'espace de travail a été réinitialisée !",
},
},
// Agent Configuration
agent: {
"performance-warning":
"La performance des LLM qui ne supportent pas explicitement l'appel d'outils dépend fortement des capacités et de la précision du modèle. Certaines capacités peuvent être limitées ou non fonctionnelles.",
@ -251,7 +287,6 @@ const TRANSLATIONS = {
"Le modèle LLM spécifique qui sera utilisé pour l'agent @agent de cet espace de travail.",
wait: "-- en attente des modèles --",
},
skill: {
title: "Compétences par défaut de l'agent",
description:
@ -290,8 +325,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "Chats de l'espace de travail",
description:
@ -306,8 +339,6 @@ const TRANSLATIONS = {
at: "Envoyé à",
},
},
// Appearance
appearance: {
title: "Apparence",
description:
@ -341,8 +372,6 @@ const TRANSLATIONS = {
link: "Lien",
},
},
// API Keys
api: {
title: "Clés API",
description:
@ -355,14 +384,12 @@ const TRANSLATIONS = {
created: "Créé",
},
},
llm: {
title: "Préférence LLM",
description:
"Voici les identifiants et les paramètres de votre fournisseur LLM de chat et d'intégration préféré. Il est important que ces clés soient actuelles et correctes, sinon AnythingLLM ne fonctionnera pas correctement.",
provider: "Fournisseur LLM",
},
transcription: {
title: "Préférence du modèle de transcription",
description:
@ -375,7 +402,6 @@ const TRANSLATIONS = {
"warn-end":
"Le modèle intégré se téléchargera automatiquement lors de la première utilisation.",
},
embedding: {
title: "Préférence d'intégration",
"desc-start":
@ -388,7 +414,6 @@ const TRANSLATIONS = {
"Aucune configuration n'est nécessaire lors de l'utilisation du moteur d'intégration natif de AnythingLLM.",
},
},
text: {
title: "Préférences de division et de découpage du texte",
"desc-start":
@ -404,15 +429,12 @@ const TRANSLATIONS = {
"C'est la longueur maximale de caractères pouvant être présents dans un seul vecteur.",
recommend: "Longueur maximale du modèle d'intégration est",
},
overlap: {
title: "Chevauchement des segments de texte",
description:
"C'est le chevauchement maximal de caractères qui se produit pendant le découpage entre deux segments de texte adjacents.",
},
},
// Vector Database
vector: {
title: "Base de données vectorielle",
description:
@ -422,8 +444,6 @@ const TRANSLATIONS = {
description: "Aucune configuration n'est nécessaire pour LanceDB.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "Widgets de chat intégrables",
description:
@ -435,7 +455,6 @@ const TRANSLATIONS = {
Active: "Domaines actifs",
},
},
"embed-chats": {
title: "Chats intégrés",
export: "Exporter",
@ -449,7 +468,6 @@ const TRANSLATIONS = {
at: "Envoyé à",
},
},
multi: {
title: "Mode multi-utilisateurs",
description:
@ -474,8 +492,6 @@ const TRANSLATIONS = {
password: "Mot de passe de l'instance",
},
},
// Event Logs
event: {
title: "Journaux d'événements",
description:
@ -487,8 +503,6 @@ const TRANSLATIONS = {
occurred: "Survenu à",
},
},
// Privacy & Data-Handling
privacy: {
title: "Confidentialité et gestion des données",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "שם סביבת העבודה",
error: "שגיאה",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "שמור שינויים",
previous: "עמוד קודם",
next: "עמוד הבא",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "הגדרות מופע",
system: "הגדרות כלליות",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "צור קשר עם התמיכה",
"browser-extension": "תוסף דפדפן",
},
// Page Definitions
login: {
"multi-user": {
welcome: "ברוכים הבאים ל-",
@ -64,7 +112,6 @@ const TRANSLATIONS = {
"back-to-login": "חזרה להתחברות",
},
},
welcomeMessage: {
part1:
"ברוכים הבאים ל-AnythingLLM, AnythingLLM היא כלי AI קוד פתוח מאת Mintplex Labs שהופך כל דבר לצ'אטבוט מאומן שאפשר לשאול אותו ולקיים איתו שיחה. AnythingLLM הוא תוכנה מסוג BYOK (הביא את המפתחות שלך) כך שאין מנוי, עמלה או חיובים עבור התוכנה הזו מלבד השירותים שאתה רוצה להשתמש בהם.",
@ -85,13 +132,10 @@ const TRANSLATIONS = {
starOnGithub: "שים כוכב ב-GitHub",
contact: "צור קשר עם Mintplex Labs",
},
"new-workspace": {
title: "סביבת עבודה חדשה",
placeholder: "סביבת העבודה שלי",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "הגדרות כלליות",
chat: "הגדרות צ'אט",
@ -99,8 +143,6 @@ const TRANSLATIONS = {
members: "חברים",
agent: "קונפיגורציה של סוכן",
},
// General Appearance
general: {
vector: {
title: "ספירת וקטורים",
@ -135,8 +177,6 @@ const TRANSLATIONS = {
"סביבת העבודה שלך. זה ימחק את כל הטבעות הווקטוריות בבסיס הנתונים הווקטורי שלך.\n\nקבצי המקור המקוריים יישארו ללא שינוי. פעולה זו היא בלתי הפיכה.",
},
},
// Chat Settings
chat: {
llm: {
title: "ספק LLM של סביבת עבודה",
@ -193,8 +233,6 @@ const TRANSLATIONS = {
hint: "לרוב ה-LLM יש טווחים שונים של ערכים תקפים. התייעץ עם ספק ה-LLM שלך לקבלת מידע.",
},
},
// Vector Database
"vector-workspace": {
identifier: "מזהה בסיס נתונים וקטור",
snippets: {
@ -221,8 +259,6 @@ const TRANSLATIONS = {
success: "בסיס הנתונים הווקטורי של סביבת העבודה איפס!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"ביצועי LLM שאינם תומכים באופן מפורש בקריאות כלים תלויים מאוד ביכולות ובדיוק של הדגם. חלק מהיכולות עשויות להיות מוגבלות או לא פונקציונליות.",
@ -242,7 +278,6 @@ const TRANSLATIONS = {
"דגם LLM ספציפי שייעשה בו שימוש עבור סוכן @agent של סביבת העבודה הזו.",
wait: "-- מחכה לדגמים --",
},
skill: {
title: "כישורי סוכן ברירת מחדל",
description:
@ -279,8 +314,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "שיחות סביבת עבודה",
description:
@ -295,8 +328,6 @@ const TRANSLATIONS = {
at: "נשלח ב-",
},
},
// Appearance
appearance: {
title: "מראה",
description: "התאם אישית את הגדרות המראה של הפלטפורמה שלך.",
@ -327,8 +358,6 @@ const TRANSLATIONS = {
link: "קישור",
},
},
// API Keys
api: {
title: "מפתחות API",
description:
@ -341,14 +370,12 @@ const TRANSLATIONS = {
created: "נוצר",
},
},
llm: {
title: "העדפת LLM",
description:
"אלה אישורי ההרשאה וההגדרות עבור ספק צ'אט והטבעה LLM המועדף עליך. חשוב שאישורי ההרשאה יהיו עדכניים ונכונים אחרת AnythingLLM לא תוכל לפעול כראוי.",
provider: "ספק LLM",
},
transcription: {
title: "העדפת דגם תמלול",
description:
@ -360,7 +387,6 @@ const TRANSLATIONS = {
"אנו ממליצים על לפחות 2GB של זיכרון RAM והעלאת קבצים <10Mb.",
"warn-end": "הדגם המובנה יתורגם אוטומטית בפעם הראשונה שבה תשתמש בו.",
},
embedding: {
title: "העדפת הטבעה",
"desc-start":
@ -373,7 +399,6 @@ const TRANSLATIONS = {
"אין צורך בהגדרה בעת שימוש במנוע ההטבעה המקורי של AnythingLLM.",
},
},
text: {
title: "הגדרות חלוקת טקסט וחלוקה",
"desc-start":
@ -388,15 +413,12 @@ const TRANSLATIONS = {
description: "זהו אורך הדמויות המרבי שיכול להיות נוכח בקטור יחיד.",
recommend: "אורך מרבי של דגם ההטבעה הוא",
},
overlap: {
title: "חפיפה של קטע טקסט",
description:
"זו החפיפה המרבית של הדמויות המתרחשת במהלך החלוקה בין שני קטעי טקסט סמוכים.",
},
},
// Vector Database
vector: {
title: "בסיס נתונים וקטור",
description:
@ -406,8 +428,6 @@ const TRANSLATIONS = {
description: "אין צורך בקונפיגורציה עבור LanceDB.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "כלי צ'אט ניתנים להטמעה",
description:
@ -419,7 +439,6 @@ const TRANSLATIONS = {
Active: "תחומים פעילים",
},
},
"embed-chats": {
title: "הטמעת שיחות",
export: "ייצוא",
@ -432,7 +451,6 @@ const TRANSLATIONS = {
at: "נשלח ב-",
},
},
multi: {
title: "מצב משתמשים מרובים",
description:
@ -457,8 +475,6 @@ const TRANSLATIONS = {
password: "סיסמת מופע",
},
},
// Event Logs
event: {
title: "יומני אירועים",
description: "הצג את כל הפעולות והאירועים שקורים במופע זה לצורך ניטור.",
@ -469,8 +485,6 @@ const TRANSLATIONS = {
occurred: "התרחש ב-",
},
},
// Privacy & Data-Handling
privacy: {
title: "פרטיות וטיפול בנתונים",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Nome delle aree di lavoro",
error: "errore",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "Salva modifiche",
previous: "Pagina precedente",
next: "Pagina successiva",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "Impostazioni istanza",
system: "Impostazioni generali",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "Contatta il Supporto",
"browser-extension": "Estensione del browser",
},
// Page Definitions
login: {
"multi-user": {
welcome: "Benvenuto in",
@ -65,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Torna al Login",
},
},
welcomeMessage: {
part1:
"Benvenuti in AnythingLLM, AnythingLLM è uno strumento di intelligenza artificiale open source di Mintplex Labs che trasforma qualsiasi cosa in un chatbot addestrato con cui puoi effettuare query e chattare. AnythingLLM è un software BYOK (bring-your-own-keys), quindi non ci sono abbonamenti, commissioni o costi per questo software al di fuori dei servizi che vuoi utilizzare.",
@ -87,13 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Metti una stella su GitHub",
contact: "Contatta Mintplex Labs",
},
"new-workspace": {
title: "Nuova area di lavoro",
placeholder: "La mia area di lavoro",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "Impostazioni generali",
chat: "Impostazioni Chat",
@ -101,8 +145,6 @@ const TRANSLATIONS = {
members: "Membri",
agent: "Configurazione dell'agente",
},
// General Appearance
general: {
vector: {
title: "Contatore dei vettori",
@ -139,8 +181,6 @@ const TRANSLATIONS = {
"area di lavoro. Verranno rimossi tutti gli embeddings vettoriali nel tuo database vettoriale.\n\nI file sorgente originali rimarranno intatti. Questa azione è irreversibile.",
},
},
// Chat Settings
chat: {
llm: {
title: "LLM Provider dell'area di lavoro",
@ -198,8 +238,6 @@ const TRANSLATIONS = {
hint: "La maggior parte degli LLM ha vari intervalli accettabili di valori validi. Consulta il tuo fornitore LLM per queste informazioni.",
},
},
// Vector Database
"vector-workspace": {
identifier: "Identificatore del database vettoriale",
snippets: {
@ -228,8 +266,6 @@ const TRANSLATIONS = {
"Il database vettoriale dell'area di lavoro è stato reimpostato!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"Le prestazioni degli LLM che non supportano esplicitamente la chiamata degli strumenti dipendono in larga misura dalle capacità e dalla precisione del modello. Alcune capacità potrebbero essere limitate o non funzionali.",
@ -249,7 +285,6 @@ const TRANSLATIONS = {
"Il modello LLM specifico che verrà utilizzato per l'agente @agent di quest'area di lavoro.",
wait: "-- in attesa dei modelli --",
},
skill: {
title: "Abilità predefinite dell'agente",
description:
@ -288,8 +323,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "Chat dell'area di lavoro",
description:
@ -304,8 +337,6 @@ const TRANSLATIONS = {
at: "Inviato a",
},
},
// Appearance
appearance: {
title: "Aspetto",
description:
@ -339,8 +370,6 @@ const TRANSLATIONS = {
link: "Collegamento",
},
},
// API Keys
api: {
title: "Chiavi API",
description:
@ -353,14 +382,12 @@ const TRANSLATIONS = {
created: "Creato",
},
},
llm: {
title: "Preferenza LLM",
description:
"Queste sono le credenziali e le impostazioni per il tuo provider di chat e embedding LLM preferito. È importante che queste chiavi siano aggiornate e corrette, altrimenti AnythingLLM non funzionerà correttamente.",
provider: "Provider LLM",
},
transcription: {
title: "Preferenza del modello di trascrizione",
description:
@ -373,7 +400,6 @@ const TRANSLATIONS = {
"warn-end":
"Il modello integrato verrà scaricato automaticamente al primo utilizzo.",
},
embedding: {
title: "Preferenza di embedding",
"desc-start":
@ -386,7 +412,6 @@ const TRANSLATIONS = {
"Non è richiesta alcuna configurazione quando si utilizza il motore di embedding nativo di AnythingLLM.",
},
},
text: {
title: "Preferenze di suddivisione e suddivisione in blocchi del testo",
"desc-start":
@ -408,8 +433,6 @@ const TRANSLATIONS = {
"Questa è la sovrapposizione massima di caratteri che si verifica durante la suddivisione in blocchi tra due porzioni di testo adiacenti.",
},
},
// Vector Database
vector: {
title: "Database vettoriale",
description:
@ -419,8 +442,6 @@ const TRANSLATIONS = {
description: "Non è richiesta alcuna configurazione per LanceDB.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "Widget di chat incorporabili",
description:
@ -432,7 +453,6 @@ const TRANSLATIONS = {
Active: "Domini attivi",
},
},
"embed-chats": {
title: "Chat incorporate",
export: "Esporta",
@ -446,7 +466,6 @@ const TRANSLATIONS = {
at: "Inviato a",
},
},
multi: {
title: "Modalità multi-utente",
description:
@ -471,8 +490,6 @@ const TRANSLATIONS = {
password: "Password istanza",
},
},
// Event Logs
event: {
title: "Registro eventi",
description:
@ -484,8 +501,6 @@ const TRANSLATIONS = {
occurred: "Si è verificato alle",
},
},
// Privacy & Data-Handling
privacy: {
title: "Privacy e gestione dei dati",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "워크스페이스 이름",
error: "오류",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "저장",
previous: "이전",
next: "다음",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "인스턴스 설정",
system: "일반 설정",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "지원팀 연락",
"browser-extension": "브라우저 확장 프로그램",
},
// Page Definitions
login: {
"multi-user": {
welcome: "웰컴!",
@ -64,13 +112,10 @@ const TRANSLATIONS = {
"back-to-login": "로그인으로 돌아가기",
},
},
"new-workspace": {
title: "새 워크스페이스",
placeholder: "내 워크스페이스",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "일반 설정",
chat: "채팅 설정",
@ -78,7 +123,6 @@ const TRANSLATIONS = {
members: "구성원",
agent: "에이전트 구성",
},
welcomeMessage: {
part1:
"AnythingLLM에 오신 것을 환영합니다. AnythingLLM은 Mintplex Labs에서 개발한 오픈 소스 AI 도구로, 어떤 것이든 훈련된 챗봇으로 변환하여 쿼리하고 대화할 수 있습니다. AnythingLLM은 BYOK(Bring Your Own Key) 소프트웨어이므로 사용하려는 서비스 외에는 구독료나 기타 비용이 없습니다.",
@ -100,8 +144,6 @@ const TRANSLATIONS = {
starOnGithub: "GitHub에 별표 달기",
contact: "Mintplex Labs에 연락하기",
},
// General Appearance
general: {
vector: {
title: "벡터 수",
@ -135,8 +177,6 @@ const TRANSLATIONS = {
"워크스페이스 전체를 삭제합니다. 이 작업은 벡터 데이터베이스에 있는 모든 벡터 임베딩을 제거합니다.\n\n원본 소스 파일은 그대로 유지됩니다. 이 작업은 되돌릴 수 없습니다.",
},
},
// Chat Settings
chat: {
llm: {
title: "워크스페이스 LLM 제공자",
@ -191,8 +231,6 @@ const TRANSLATIONS = {
hint: "대부분의 LLM은 유효한 값의 다양한 허용 범위를 가지고 있습니다. 해당 정보는 LLM 제공자에게 문의하세요.",
},
},
// Vector Database
"vector-workspace": {
identifier: "벡터 데이터베이스 식별자",
snippets: {
@ -219,8 +257,6 @@ const TRANSLATIONS = {
success: "워크스페이스 벡터 데이터베이스가 재설정되었습니다!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"도구 호출을 명시적으로 지원하지 않는 LLM의 성능은 모델의 기능과 정확도에 크게 좌우됩니다. 일부 기능은 제한되거나 작동하지 않을 수 있습니다.",
@ -240,7 +276,6 @@ const TRANSLATIONS = {
"이 워크스페이스의 @agent 에이전트에 사용할 특정 LLM 모델입니다.",
wait: "-- 모델 기다리는 중 --",
},
skill: {
title: "기본 에이전트 스킬",
description:
@ -279,8 +314,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "워크스페이스 채팅",
description:
@ -295,8 +328,6 @@ const TRANSLATIONS = {
at: "보낸 시각",
},
},
// Appearance
appearance: {
title: "외관",
description: "플랫폼의 외관 설정을 수정합니다.",
@ -327,8 +358,6 @@ const TRANSLATIONS = {
link: "링크",
},
},
// API Keys
api: {
title: "API 키",
description:
@ -341,14 +370,12 @@ const TRANSLATIONS = {
created: "생성일",
},
},
llm: {
title: "LLM 기본 설정",
description:
"이것은 채팅과 임베딩을 하기 위한 선호하는 LLM 제공자의 인증입니다. 이 키가 현재 활성 상태이고 정확해야 AnythingLLM이 제대로 작동합니다.",
provider: "LLM 제공자",
},
transcription: {
title: "텍스트 변환 모델 기본 설정",
description:
@ -359,7 +386,6 @@ const TRANSLATIONS = {
"warn-recommend": "최소 2GB RAM과 10Mb 보다 작은 파일 업로드를 권장합니다.",
"warn-end": "내장된 모델은 첫 번째 사용 시 자동으로 다운로드됩니다.",
},
embedding: {
title: "임베딩 기본 설정",
"desc-start":
@ -372,7 +398,6 @@ const TRANSLATIONS = {
"AnythingLLM의 기본 임베딩 엔진을 사용할 때는 설정이 필요하지 않습니다.",
},
},
text: {
title: "텍스트 분할 및 청킹 기본 설정",
"desc-start":
@ -387,15 +412,12 @@ const TRANSLATIONS = {
description: "단일 벡터에 들어갈 수 있는 최대 문자 길이입니다.",
recommend: "임베드 모델 최대 길이는",
},
overlap: {
title: "텍스트 청크 겹침",
description:
"청킹 동안 두 인접 텍스트 청크 간에 겹칠 수 있는 최대 문자 수입니다.",
},
},
// Vector Database
vector: {
title: "벡터 데이터베이스",
description:
@ -405,8 +427,6 @@ const TRANSLATIONS = {
description: "LanceDB를 선택하면 설정이 필요 없습니다.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "임베드 가능한 채팅 위젯",
description:
@ -418,7 +438,6 @@ const TRANSLATIONS = {
Active: "활성 도메인",
},
},
"embed-chats": {
title: "임베드 채팅",
export: "내보내기",
@ -431,7 +450,6 @@ const TRANSLATIONS = {
at: "보낸 시각",
},
},
multi: {
title: "다중 사용자 모드",
description:
@ -456,8 +474,6 @@ const TRANSLATIONS = {
password: "인스턴스 비밀번호",
},
},
// Event Logs
event: {
title: "이벤트 로그",
description:
@ -469,8 +485,6 @@ const TRANSLATIONS = {
occurred: "발생 시각",
},
},
// Privacy & Data-Handling
privacy: {
title: "개인정보와 데이터 처리",
description:

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Werkruimten Naam",
error: "fout",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "Wijzigingen opslaan",
previous: "Vorige pagina",
next: "Volgende pagina",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "Instelling Instanties",
system: "Algemene Instellingen",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "Contact Ondersteuning",
"browser-extension": "Browser Extensie",
},
// Page Definitions
login: {
"multi-user": {
welcome: "Welkom bij",
@ -65,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Terug naar Inloggen",
},
},
welcomeMessage: {
part1:
"Welkom bij AnythingLLM, AnythingLLM is een open-source AI-tool van Mintplex Labs die alles omzet in een getrainde chatbot waarmee je kunt vragen en chatten. AnythingLLM is een BYOK (bring-your-own-keys) software, dus er is geen abonnement, vergoeding of kosten voor deze software buiten de diensten die je ermee wilt gebruiken.",
@ -87,13 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Ster op GitHub",
contact: "Contact Mintplex Labs",
},
"new-workspace": {
title: "Nieuwe Werkruimte",
placeholder: "Mijn Werkruimte",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "Algemene Instellingen",
chat: "Chat Instellingen",
@ -101,8 +145,6 @@ const TRANSLATIONS = {
members: "Leden",
agent: "Agent Configuratie",
},
// General Appearance
general: {
vector: {
title: "Vector Teller",
@ -138,8 +180,6 @@ const TRANSLATIONS = {
"werkruimte te verwijderen. Dit zal alle vector inbeddingen in je vector database verwijderen.\n\nDe originele bronbestanden blijven onaangetast. Deze actie is onomkeerbaar.",
},
},
// Chat Settings
chat: {
llm: {
title: "Werkruimte LLM Provider",
@ -197,8 +237,6 @@ const TRANSLATIONS = {
hint: "De meeste LLM's hebben verschillende acceptabele reeksen van geldige waarden. Raadpleeg je LLM-provider voor die informatie.",
},
},
// Vector Database
"vector-workspace": {
identifier: "Vector database-identificator",
snippets: {
@ -225,8 +263,6 @@ const TRANSLATIONS = {
success: "Werkruimte vector database is gereset!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"De prestaties van LLM's die geen tool-aanroep expliciet ondersteunen, zijn sterk afhankelijk van de capaciteiten en nauwkeurigheid van het model. Sommige vaardigheden kunnen beperkt of niet-functioneel zijn.",
@ -246,7 +282,6 @@ const TRANSLATIONS = {
"Het specifieke LLM-model dat voor het @agent-agent van deze werkruimte zal worden gebruikt.",
wait: "-- wachten op modellen --",
},
skill: {
title: "Standaard agentvaardigheden",
description:
@ -285,8 +320,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "Werkruimte Chats",
description:
@ -301,8 +334,6 @@ const TRANSLATIONS = {
at: "Verzonden Om",
},
},
// Appearance
appearance: {
title: "Verschijning",
description: "Pas de verschijningsinstellingen van je platform aan.",
@ -334,8 +365,6 @@ const TRANSLATIONS = {
link: "Link",
},
},
// API Keys
api: {
title: "API-sleutels",
description:
@ -348,14 +377,12 @@ const TRANSLATIONS = {
created: "Aangemaakt",
},
},
llm: {
title: "LLM Voorkeur",
description:
"Dit zijn de inloggegevens en instellingen voor je voorkeurs LLM-chat & inbeddingprovider. Het is belangrijk dat deze sleutels actueel en correct zijn, anders zal AnythingLLM niet goed werken.",
provider: "LLM Provider",
},
transcription: {
title: "Transcriptiemodel Voorkeur",
description:
@ -368,7 +395,6 @@ const TRANSLATIONS = {
"warn-end":
"Het ingebouwde model wordt automatisch gedownload bij het eerste gebruik.",
},
embedding: {
title: "Inbedding Voorkeur",
"desc-start":
@ -381,7 +407,6 @@ const TRANSLATIONS = {
"Er is geen instelling vereist bij gebruik van de ingebouwde inbeddingengine van AnythingLLM.",
},
},
text: {
title: "Tekst Splitsen & Chunking Voorkeuren",
"desc-start":
@ -397,15 +422,12 @@ const TRANSLATIONS = {
"Dit is de maximale lengte van tekens die aanwezig kan zijn in een enkele vector.",
recommend: "Inbed model maximale lengte is",
},
overlap: {
title: "Tekst Chunk Overlap",
description:
"Dit is de maximale overlap van tekens die optreedt tijdens het chunking tussen twee aangrenzende tekstchunks.",
},
},
// Vector Database
vector: {
title: "Vector Database",
description:
@ -415,8 +437,6 @@ const TRANSLATIONS = {
description: "Er is geen configuratie nodig voor LanceDB.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "Inbedbare Chat Widgets",
description:
@ -428,7 +448,6 @@ const TRANSLATIONS = {
Active: "Actieve Domeinen",
},
},
"embed-chats": {
title: "Inbedding Chats",
export: "Exporteren",
@ -442,7 +461,6 @@ const TRANSLATIONS = {
at: "Verzonden Om",
},
},
multi: {
title: "Multi-Gebruikersmodus",
description:
@ -467,8 +485,6 @@ const TRANSLATIONS = {
password: "Instantie wachtwoord",
},
},
// Event Logs
event: {
title: "Gebeurtenislogboeken",
description:
@ -480,8 +496,6 @@ const TRANSLATIONS = {
occurred: "Opgetreden Op",
},
},
// Privacy & Data-Handling
privacy: {
title: "Privacy & Gegevensverwerking",
description:

View file

@ -0,0 +1,157 @@
// This script is used to normalize the translations files to ensure they are all the same.
// This will take the en file and compare it to all other files and ensure they are all the same.
// If a non-en file is missing a key, it will be added to the file and set to null
import { resources } from "./resources.js";
import fs from "fs";
const languageNames = new Intl.DisplayNames(Object.keys(resources), {
type: "language",
});
function langDisplayName(lang) {
return languageNames.of(lang);
}
function compareStructures(lang, a, b, subdir = null) {
//if a and b aren't the same type, they can't be equal
if (typeof a !== typeof b && a !== null && b !== null) {
console.log("Invalid type comparison", [
{
lang,
a: typeof a,
b: typeof b,
values: {
a,
b,
},
...(!!subdir ? { subdir } : {}),
},
]);
return false;
}
// Need the truthy guard because
// typeof null === 'object'
if (a && typeof a === "object") {
var keysA = Object.keys(a).sort(),
keysB = Object.keys(b).sort();
//if a and b are objects with different no of keys, unequal
if (keysA.length !== keysB.length) {
console.log("Keys are missing!", {
[lang]: keysA,
en: keysB,
...(!!subdir ? { subdir } : {}),
diff: {
added: keysB.filter((key) => !keysA.includes(key)),
removed: keysA.filter((key) => !keysB.includes(key)),
},
});
return false;
}
//if keys aren't all the same, unequal
if (
!keysA.every(function (k, i) {
return k === keysB[i];
})
) {
console.log("Keys are not equal!", {
[lang]: keysA,
en: keysB,
...(!!subdir ? { subdir } : {}),
});
return false;
}
//recurse on the values for each key
return keysA.every(function (key) {
//if we made it here, they have identical keys
return compareStructures(lang, a[key], b[key], key);
});
//for primitives just ignore since we don't check values.
} else {
return true;
}
}
function normalizeTranslations(lang, source, target, subdir = null) {
// Handle primitives - if target exists, keep it, otherwise set null
if (!source || typeof source !== "object") {
return target ?? null;
}
// Handle objects
const normalized = target && typeof target === "object" ? { ...target } : {};
// Add all keys from source (English), setting to null if missing
for (const key of Object.keys(source)) {
normalized[key] = normalizeTranslations(
lang,
source[key],
normalized[key],
key
);
}
return normalized;
}
function ISOToFilename(lang) {
const ISO_TO_FILENAME = {
"zh-tw": "zh_TW",
pt: "pt_BR",
vi: "vn",
};
return ISO_TO_FILENAME[lang] || lang.replace("-", "_");
}
const failed = [];
const TRANSLATIONS = {};
for (const [lang, { common }] of Object.entries(resources)) {
TRANSLATIONS[lang] = common;
}
const PRIMARY = { ...TRANSLATIONS["en"] };
delete TRANSLATIONS["en"];
console.log(
`The following translation files will be normalized against the English file: [${Object.keys(
TRANSLATIONS
).join(",")}]`
);
// Normalize each non-English translation
for (const [lang, translations] of Object.entries(TRANSLATIONS)) {
const normalized = normalizeTranslations(lang, PRIMARY, translations);
// Update the translations in resources
resources[lang].common = normalized;
// Verify the structure matches
const passed = compareStructures(lang, normalized, PRIMARY);
console.log(`${langDisplayName(lang)} (${lang}): ${passed ? "✅" : "❌"}`);
!passed && failed.push(lang);
const langFilename = ISOToFilename(lang);
fs.writeFileSync(
`./${langFilename}/common.js`,
`// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = ${JSON.stringify(normalized, null, 2)}
export default TRANSLATIONS;`
);
}
if (failed.length !== 0) {
throw new Error(
`Error verifying normalized translations. Please check the logs.`,
failed
);
}
console.log(
`👍 All translation files have been normalized to match the English schema!`
);
process.exit(0);

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Nome dos Workspaces",
error: "erro",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "Salvar alterações",
previous: "Página Anterior",
next: "Próxima Página",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "Configurações da Instância",
system: "Configurações Gerais",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "Contato com Suporte",
"browser-extension": "Extensão do navegador",
},
// Page Definitions
login: {
"multi-user": {
welcome: "Bem-vindo ao",
@ -65,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Voltar ao Login",
},
},
welcomeMessage: {
part1:
"Bem-vindo ao AnythingLLM, AnythingLLM é uma ferramenta de IA de código aberto da Mintplex Labs que transforma qualquer coisa em um chatbot treinado que você pode consultar e conversar. AnythingLLM é um software BYOK (bring-your-own-keys | traga suas próprias chaves), portanto, não há assinatura, taxa ou cobranças para este software fora dos serviços que você deseja usar com ele.",
@ -87,13 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Dar estrela no GitHub",
contact: "Contato Mintplex Labs",
},
"new-workspace": {
title: "Novo Workspace",
placeholder: "Meu Workspace",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "Configurações Gerais",
chat: "Configurações de Chat",
@ -101,8 +145,6 @@ const TRANSLATIONS = {
members: "Membros",
agent: "Configuração do Agente",
},
// General Appearance
general: {
vector: {
title: "Contagem de Vetores",
@ -138,8 +180,6 @@ const TRANSLATIONS = {
"workspace. Isso removerá todas as incorporações vetoriais no seu banco de dados vetorial.\n\nOs arquivos de origem originais permanecerão intactos. Esta ação é irreversível.",
},
},
// Chat Settings
chat: {
llm: {
title: "Provedor de LLM do Workspace",
@ -197,8 +237,6 @@ const TRANSLATIONS = {
hint: "A maioria dos LLMs tem vários intervalos aceitáveis de valores válidos. Consulte seu provedor de LLM para essa informação.",
},
},
// Vector Database
"vector-workspace": {
identifier: "Identificador do Banco de Dados Vetorial",
snippets: {
@ -226,8 +264,6 @@ const TRANSLATIONS = {
"O banco de dados vetorial do workspace foi redefinido com sucesso!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"O desempenho dos LLMs que não suportam explicitamente a chamada de ferramentas depende muito das capacidades e da precisão do modelo. Algumas habilidades podem ser limitadas ou não funcionais.",
@ -247,7 +283,6 @@ const TRANSLATIONS = {
"O modelo de LLM específico que será usado para o agente @agent deste workspace.",
wait: "-- aguardando modelos --",
},
skill: {
title: "Habilidades padrão do agente",
description:
@ -286,8 +321,6 @@ const TRANSLATIONS = {
},
},
},
// Workspace Chats
recorded: {
title: "Chats do Workspace",
description:
@ -302,8 +335,6 @@ const TRANSLATIONS = {
at: "Enviado Em",
},
},
// Appearance
appearance: {
title: "Aparência",
description: "Personalize as configurações de aparência da sua plataforma.",
@ -336,8 +367,6 @@ const TRANSLATIONS = {
link: "Link",
},
},
// API Keys
api: {
title: "Chaves API",
description:
@ -350,14 +379,12 @@ const TRANSLATIONS = {
created: "Criado",
},
},
llm: {
title: "Preferência de LLM",
description:
"Estas são as credenciais e configurações para seu provedor preferido de chat e incorporação de LLM. É importante que essas chaves estejam atualizadas e corretas, caso contrário, o AnythingLLM não funcionará corretamente.",
provider: "Provedor de LLM",
},
transcription: {
title: "Preferência de Modelo de Transcrição",
description:
@ -370,7 +397,6 @@ const TRANSLATIONS = {
"warn-end":
"O modelo embutido será baixado automaticamente no primeiro uso.",
},
embedding: {
title: "Preferência de Incorporação",
"desc-start":
@ -383,7 +409,6 @@ const TRANSLATIONS = {
"Não é necessária configuração ao usar o mecanismo de incorporação nativo do AnythingLLM.",
},
},
text: {
title: "Preferências de Divisão e Fragmentação de Texto",
"desc-start":
@ -399,15 +424,12 @@ const TRANSLATIONS = {
"Este é o comprimento máximo de caracteres que pode estar presente em um único vetor.",
recommend: "O comprimento máximo do modelo de incorporação é",
},
overlap: {
title: "Sobreposição de Fragmento de Texto",
description:
"Esta é a sobreposição máxima de caracteres que ocorre durante a fragmentação entre dois fragmentos de texto adjacentes.",
},
},
// Vector Database
vector: {
title: "Banco de Dados Vetorial",
description:
@ -417,8 +439,6 @@ const TRANSLATIONS = {
description: "Não há configuração necessária para o LanceDB.",
},
},
// Embeddable Chat Widgets
embeddable: {
title: "Widgets de Chat Incorporáveis",
description:
@ -430,7 +450,6 @@ const TRANSLATIONS = {
Active: "Domínios Ativos",
},
},
"embed-chats": {
title: "Incorporar Chats",
export: "Exportar",
@ -444,7 +463,6 @@ const TRANSLATIONS = {
at: "Enviado Em",
},
},
multi: {
title: "Modo Multiusuário",
description:
@ -469,8 +487,6 @@ const TRANSLATIONS = {
password: "Senha da instância",
},
},
// Event Logs
event: {
title: "Logs de Eventos",
description:
@ -482,8 +498,6 @@ const TRANSLATIONS = {
occurred: "Ocorreu Em",
},
},
// Privacy & Data-Handling
privacy: {
title: "Privacidade e Tratamento de Dados",
description:

View file

@ -28,6 +28,7 @@ import Dutch from "./nl/common.js";
import Vietnamese from "./vn/common.js";
import TraditionalChinese from "./zh_TW/common.js";
import Farsi from "./fa/common.js";
import Turkish from "./tr/common.js";
export const defaultNS = "common";
export const resources = {
@ -73,4 +74,7 @@ export const resources = {
fa: {
common: Farsi,
},
tr: {
common: Turkish,
},
};

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Имя рабочих пространств",
error: "ошибка",
@ -9,8 +58,10 @@ const TRANSLATIONS = {
save: "Сохранить изменения",
previous: "Предыдущая страница",
next: "Следующая страница",
optional: null,
yes: null,
no: null,
},
settings: {
title: "Настройки экземпляра",
system: "Системные настройки",
@ -39,7 +90,6 @@ const TRANSLATIONS = {
contact: "联系支持Связаться с Поддержкой",
"browser-extension": "Расширение браузера",
},
login: {
"multi-user": {
welcome: "Добро пожаловать в",
@ -63,7 +113,6 @@ const TRANSLATIONS = {
"back-to-login": "Вернуться к входу",
},
},
welcomeMessage: {
part1:
"Добро пожаловать в AnythingLLM, открытый инструмент искусственного интеллекта от Mintplex Labs, который превращает что угодно в обученный чат-бот, с которым вы можете общаться и задавать вопросы. AnythingLLM - это ПО BYOK (принеси свои собственные ключи), поэтому за использование этого ПО нет подписки, платы или других сборов, кроме тех, что вы хотите использовать.",
@ -85,12 +134,10 @@ const TRANSLATIONS = {
starOnGithub: "Звезда на GitHub",
contact: "Связаться с Mintplex Labs",
},
"new-workspace": {
title: "Новая Рабочая Область",
placeholder: "Моя Рабочая Область",
},
"workspaces—settings": {
general: "Общие настройки",
chat: "Настройки чата",

View file

@ -0,0 +1,511 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Çalışma Alanları Adı",
error: "hata",
success: "başarı",
user: "Kullanıcı",
selection: "Model Seçimi",
saving: "Kaydediliyor...",
save: "Değişiklikleri Kaydet",
previous: "Önceki Sayfa",
next: "Sonraki Sayfa",
optional: null,
yes: null,
no: null,
},
settings: {
title: "Instance Ayarları",
system: "Genel Ayarlar",
invites: "Davetler",
users: "Kullanıcılar",
workspaces: "Çalışma Alanları",
"workspace-chats": "Çalışma Alanı Sohbetleri",
customization: "Özelleştirme",
"api-keys": "Geliştirici API",
llm: "LLM",
transcription: "Transkripsiyon",
embedder: "Gömme Aracı",
"text-splitting": "Metin Bölme & Parçalama",
"voice-speech": "Ses & Konuşma",
"vector-database": "Vektör Veritabanı",
embeds: "Sohbet Gömme",
"embed-chats": "Gömme Sohbet Geçmişi",
security: "Güvenlik",
"event-logs": "Olay Kayıtları",
privacy: "Gizlilik & Veri",
"ai-providers": "Yapay Zeka Sağlayıcıları",
"agent-skills": "Ajan Becerileri",
admin: "Yönetici",
tools: "Araçlar",
"experimental-features": "Deneysel Özellikler",
contact: "Destekle İletişime Geçin",
"browser-extension": "Tarayıcı Uzantısı",
},
login: {
"multi-user": {
welcome: "Hoş geldiniz",
"placeholder-username": "Kullanıcı Adı",
"placeholder-password": "Şifre",
login: "Giriş Yap",
validating: "Doğrulanıyor...",
"forgot-pass": "Şifremi Unuttum",
reset: "Sıfırla",
},
"sign-in": {
start: "Hesabınıza",
end: "giriş yapın.",
},
"password-reset": {
title: "Şifre Sıfırlama",
description: "Şifrenizi sıfırlamak için gerekli bilgileri aşağıya girin.",
"recovery-codes": "Kurtarma Kodları",
"recovery-code": "Kurtarma Kodu {{index}}",
"back-to-login": "Girişe Geri Dön",
},
},
welcomeMessage: {
part1:
"AnythingLLM'e hoş geldiniz. AnythingLLM, Mintplex Labs tarafından geliştirilen açık kaynaklı bir yapay zeka aracıdır ve her şeyi, sorgulayabileceğiniz ve sohbet edebileceğiniz eğitimli bir chatbota dönüştürür. AnythingLLM, BYOK (kendi anahtarlarınızı getirin) yazılımıdır; bu nedenle, kullanmak istediğiniz hizmetler dışında herhangi bir abonelik, ücret ya da ek masraf yoktur.",
part2:
"AnythingLLM, OpenAi, GPT-4, LangChain, PineconeDB, ChromaDB ve benzeri güçlü yapay zeka ürünlerini zahmetsizce, düzenli bir paket içinde bir araya getirmenin en kolay yoludur; böylece verimliliğinizi 100 kat artırabilirsiniz.",
part3:
"AnythingLLM tamamen yerel olarak makinenizde çok az kaynakla çalışabilir—orada olduğunu bile fark etmezsiniz! GPU gerekmez. Bulut veya şirket içi (on-premises) kurulum da mevcuttur.\nYapay zeka araç ekosistemi her geçen gün daha da güçleniyor. AnythingLLM bu gücü kolayca kullanmanızı sağlar.",
githubIssue: "Github'da bir sorun oluşturun",
user1: "Nasıl başlarım?!",
part4:
"Bu çok basit. Tüm koleksiyonlar, 'Çalışma Alanları' (Workspaces) adını verdiğimiz gruplar halinde düzenlenir. Çalışma Alanları; dosyalar, belgeler, resimler, PDF'ler ve diğer dosyaların LLM'lerin anlayabileceği ve sohbette kullanabileceği biçime dönüştürüleceği gruplardır.\n\nİstediğiniz zaman dosya ekleyip kaldırabilirsiniz.",
createWorkspace: "İlk çalışma alanınızı oluşturun",
user2:
"Bu bir tür yapay zeka Dropbox'ı gibi mi? Peki sohbet etmek nasıl? Bir chatbot değil mi?",
part5:
"AnythingLLM, sıradan bir Dropbox'tan çok daha fazlasıdır.\n\nAnythingLLM, verilerinizle etkileşime geçmenin iki yolunu sunar:\n\n<i>Sorgu (Query):</i> Sohbetleriniz, çalışma alanınızdaki belgelere erişip onlardan elde ettiği verileri veya çıkarımları size sunar. Çalışma Alanınıza daha fazla belge eklemek, onu daha akıllı hâle getirir!\n\n<i>Konuşma (Conversational):</i> Belgeleriniz ve devam eden sohbet geçmişiniz, aynı anda LLM'in bilgi tabanına katkıda bulunur. Bu, gerçek zamanlı metin bilgileri, düzeltmeler veya LLM'nin yanlış anlayabileceği noktaların düzeltilmesi için mükemmeldir.\n\nSohbet esnasında, <i>iki mod arasında istediğiniz an</i> geçiş yapabilirsiniz!",
user3: "Vay, harika görünüyor. Hemen denemek istiyorum!",
part6: "İyi eğlenceler!",
starOnGithub: "GitHub'da Yıldız Verin",
contact: "Mintplex Labs ile İletişime Geçin",
},
"new-workspace": {
title: "Yeni Çalışma Alanı",
placeholder: "Benim Çalışma Alanım",
},
"workspaces—settings": {
general: "Genel Ayarlar",
chat: "Sohbet Ayarları",
vector: "Vektör Veritabanı",
members: "Üyeler",
agent: "Ajan Yapılandırması",
},
general: {
vector: {
title: "Vektör Sayısı",
description: "Vektör veritabanınızdaki toplam vektör sayısı.",
},
names: {
description:
"Bu, yalnızca çalışma alanınızın görüntü adını değiştirecektir.",
},
message: {
title: "Önerilen Sohbet Mesajları",
description:
"Çalışma alanı kullanıcılarınıza önerilecek sohbet mesajlarını özelleştirin.",
add: "Yeni mesaj ekle",
save: "Mesajları Kaydet",
heading: "Bana açıkla",
body: "AnythingLLM'nin faydalarını",
},
pfp: {
title: "Asistan Profil Görseli",
description:
"Bu çalışma alanı için asistanın profil resmini özelleştirin.",
image: "Çalışma Alanı Görseli",
remove: "Çalışma Alanı Görselini Kaldır",
},
delete: {
title: "Çalışma Alanını Sil",
description:
"Bu çalışma alanını ve tüm verilerini silin. Bu işlem, çalışma alanını tüm kullanıcılar için silecektir.",
delete: "Çalışma Alanını Sil",
deleting: "Çalışma Alanı Siliniyor...",
"confirm-start": "Tüm çalışma alanınızı silmek üzeresiniz",
"confirm-end":
". Bu, vektör veritabanınızdaki tüm vektör gömme verilerini kaldıracaktır.\n\nOrijinal kaynak dosyalar etkilenmeyecektir. Bu işlem geri alınamaz.",
},
},
chat: {
llm: {
title: "Çalışma Alanı LLM Sağlayıcısı",
description:
"Bu çalışma alanı için kullanılacak belirli LLM sağlayıcısı ve modeli. Varsayılan olarak sistem LLM sağlayıcısı ve ayarları kullanılır.",
search: "Tüm LLM sağlayıcılarını ara",
},
model: {
title: "Çalışma Alanı Sohbet Modeli",
description:
"Bu çalışma alanı için kullanılacak belirli sohbet modeli. Boş bırakılırsa, sistem LLM tercihi kullanılacaktır.",
wait: "-- modeller bekleniyor --",
},
mode: {
title: "Sohbet Modu",
chat: {
title: "Sohbet",
"desc-start": "LLM'nin genel bilgisiyle yanıtlar sunar",
and: "ve",
"desc-end": "bulunan belge bağlamını ekler.",
},
query: {
title: "Sorgu",
"desc-start": "yanıtları",
only: "sadece",
"desc-end": "belge bağlamı bulunduğunda sunar.",
},
},
history: {
title: "Sohbet Geçmişi",
"desc-start":
"Yanıta dahil edilecek önceki sohbetlerin sayısı (kısa süreli hafıza).",
recommend: "20 önerilir. ",
"desc-end":
"45'ten fazlası, mesaj boyutuna göre sürekli sohbet hatalarına yol açabilir.",
},
prompt: {
title: "Komut (Prompt)",
description:
"Bu çalışma alanında kullanılacak komut. Yapay zekanın yanıt üretmesi için bağlam ve talimatları tanımlayın. Uygun ve doğru yanıtlar almak için özenle hazırlanmış bir komut sağlamalısınız.",
},
refusal: {
title: "Sorgu Modu Ret Yanıtı",
"desc-start": "Eğer",
query: "sorgu",
"desc-end":
"modunda bağlam bulunamazsa, özel bir ret yanıtı döndürmek isteyebilirsiniz.",
},
temperature: {
title: "LLM Sıcaklığı",
"desc-start":
'Bu ayar, LLM yanıtlarının ne kadar "yaratıcı" olacağını kontrol eder.',
"desc-end":
"Sayı yükseldikçe yaratıcı yanıtlar artar. Bazı modeller için bu değer çok yüksek ayarlandığında anlamsız yanıtlar ortaya çıkabilir.",
hint: "Çoğu LLM'in farklı kabul edilebilir değer aralıkları vardır. Ayrıntılar için LLM sağlayıcınıza danışın.",
},
},
"vector-workspace": {
identifier: "Vektör veritabanı tanımlayıcısı",
snippets: {
title: "Maksimum Bağlam Parçacıkları",
description:
"Bu ayar, sohbet veya sorgu başına LLM'e gönderilecek maksimum bağlam parçacığı sayısını kontrol eder.",
recommend: "Önerilen: 4",
},
doc: {
title: "Belge benzerlik eşiği",
description:
"Bir kaynağın sohbetle ilişkili sayılabilmesi için gereken minimum benzerlik puanı. Sayı yükseldikçe, kaynağın sohbete benzerliği de o kadar yüksek olmalıdır.",
zero: "Kısıtlama yok",
low: "Düşük (benzerlik puanı ≥ .25)",
medium: "Orta (benzerlik puanı ≥ .50)",
high: "Yüksek (benzerlik puanı ≥ .75)",
},
reset: {
reset: "Vektör veritabanını sıfırla",
resetting: "Vektörler temizleniyor...",
confirm:
"Bu çalışma alanının vektör veritabanını sıfırlamak üzeresiniz. Bu işlem, hâlihazırda gömülü olan tüm vektör verilerini kaldıracaktır.\n\nOrijinal kaynak dosyalar etkilenmeyecektir. Bu işlem geri alınamaz.",
error: "Çalışma alanının vektör veritabanı sıfırlanamadı!",
success: "Çalışma alanının vektör veritabanı sıfırlandı!",
},
},
agent: {
"performance-warning":
"Araç çağırmayııkça desteklemeyen LLM'lerin performansı, modelin yetenekleri ve doğruluğuna büyük ölçüde bağlıdır. Bazı beceriler kısıtlı veya işlevsiz olabilir.",
provider: {
title: "Çalışma Alanı Ajan LLM Sağlayıcısı",
description:
"Bu çalışma alanındaki @agent ajanı için kullanılacak spesifik LLM sağlayıcısı ve modeli.",
},
mode: {
chat: {
title: "Çalışma Alanı Ajan Sohbet Modeli",
description:
"Bu çalışma alanındaki @agent ajanı için kullanılacak spesifik sohbet modeli.",
},
title: "Çalışma Alanı Ajan Modeli",
description:
"Bu çalışma alanındaki @agent ajanı için kullanılacak spesifik LLM modeli.",
wait: "-- modeller bekleniyor --",
},
skill: {
title: "Varsayılan ajan becerileri",
description:
"Varsayılan ajanın doğal yeteneklerini, hazır oluşturulmuş bu becerilerle geliştirin. Bu yapılandırma tüm çalışma alanları için geçerlidir.",
rag: {
title: "RAG ve uzun vadeli hafıza",
description:
'Ajana, yerel belgelerinizi kullanarak soruları yanıtlatma veya bazı içerikleri "hatırlaması" için uzun vadeli hafıza kullanma izni verin.',
},
view: {
title: "Belgeleri görüntüleme & özetleme",
description:
"Ajana, çalışma alanında hâlihazırda gömülü olan dosyaları listeleyip özetleme izni verin.",
},
scrape: {
title: "Web sitelerini tarama",
description:
"Ajana, web sitelerini ziyaret edip içeriklerini tarama izni verin.",
},
generate: {
title: "Grafik oluşturma",
description:
"Varsayılan ajanın, sağlanan veya sohbette yer alan verilere göre çeşitli grafik türleri oluşturmasına izin verin.",
},
save: {
title: "Tarayıcıya dosya oluştur & kaydet",
description:
"Varsayılan ajanın, oluşturduğu dosyaları kaydetmesine ve tarayıcıda indirilebilir hale getirmesine izin verin.",
},
web: {
title: "Canlı web araması ve gezinme",
"desc-start":
"Ajanınızın, bir web arama (SERP) sağlayıcısına bağlanarak sorularınızı yanıtlamak için web üzerinde arama yapmasına izin verin.",
"desc-end":
"Ajan oturumlarında web araması, bu ayar etkinleştirilene kadar çalışmayacaktır.",
},
},
},
recorded: {
title: "Çalışma Alanı Sohbetleri",
description:
"Bunlar, kullanıcılar tarafından gönderilen ve oluşturulma tarihlerine göre sıralanan tüm kayıtlı sohbetler ve mesajlardır.",
export: "Dışa Aktar",
table: {
id: "Id",
by: "Gönderen",
workspace: "Çalışma Alanı",
prompt: "Komut (Prompt)",
response: "Yanıt",
at: "Gönderilme Zamanı",
},
},
appearance: {
title: "Görünüm",
description: "Platformunuzun görünüm ayarlarını özelleştirin.",
logo: {
title: "Logoyu Özelleştir",
description:
"Özel bir logo yükleyerek chatbot'unuzu kendinize ait hale getirin.",
add: "Özel bir logo ekle",
recommended: "Önerilen boyut: 800 x 200",
remove: "Kaldır",
replace: "Değiştir",
},
message: {
title: "Mesajları Özelleştir",
description:
"Kullanıcılarınıza gösterilen otomatik mesajları özelleştirin.",
new: "Yeni",
system: "sistem",
user: "kullanıcı",
message: "mesaj",
assistant: "AnythingLLM Sohbet Asistanı",
"double-click": "Düzenlemek için çift tıklayın...",
save: "Mesajları Kaydet",
},
icons: {
title: "Özel Altbilgi Simgeleri",
description:
"Kenar çubuğunun altında görüntülenen altbilgi simgelerini özelleştirin.",
icon: "Simge",
link: "Bağlantı",
},
},
api: {
title: "API Anahtarları",
description:
"API anahtarları, bu AnythingLLM örneğine programatik olarak erişmeye ve yönetmeye olanak tanır.",
link: "API dokümantasyonunu okuyun",
generate: "Yeni API Anahtarı Oluştur",
table: {
key: "API Anahtarı",
by: "Oluşturan",
created: "Oluşturulma Tarihi",
},
},
llm: {
title: "LLM Tercihi",
description:
"Bu, tercih ettiğiniz LLM sohbet ve gömme sağlayıcısının kimlik bilgileri ile ayarlarıdır. Bu anahtarların güncel ve doğru olması önemlidir; aksi takdirde AnythingLLM doğru çalışmayacaktır.",
provider: "LLM Sağlayıcısı",
},
transcription: {
title: "Transkripsiyon Model Tercihi",
description:
"Bu, tercih ettiğiniz transkripsiyon modeli sağlayıcısının kimlik bilgileri ve ayarlarıdır. Anahtarların güncel ve doğru olması önemlidir; aksi takdirde medya dosyaları ve sesler transkribe edilemez.",
provider: "Transkripsiyon Sağlayıcısı",
"warn-start":
"Sınırlı RAM veya CPU'ya sahip makinelerde yerel Whisper modelini kullanmak, medya dosyalarını işlerken AnythingLLM'nin duraksamasına neden olabilir.",
"warn-recommend":
"En az 2GB RAM öneriyoruz ve 10MB üzerinde dosya yüklememeye dikkat edin.",
"warn-end":
"Yerleşik model, ilk kullanımda otomatik olarak indirilecektir.",
},
embedding: {
title: "Gömme (Embedding) Tercihi",
"desc-start":
"Yerel olarak gömme mekanizmasını desteklemeyen bir LLM kullanıyorsanız, metinleri gömmek için ek kimlik bilgileri girmeniz gerekebilir.",
"desc-end":
"Gömme, metni vektörlere dönüştürme sürecidir. Dosyalarınızın ve komutlarınızın işlenebilmesi için AnythingLLM, bu kimlik bilgilerine ihtiyaç duyar.",
provider: {
title: "Embedding Sağlayıcısı",
description:
"AnythingLLM'nin yerel gömme motoru kullanıldığında ek bir kurulum gerekmez.",
},
},
text: {
title: "Metin Bölme & Parçalama Tercihleri",
"desc-start":
"Bazı durumlarda, yeni belgelerin vektör veritabanınıza eklenmeden önce hangi varsayılan yöntemle bölünüp parçalanacağını değiştirmek isteyebilirsiniz.",
"desc-end":
"Metin bölmenin nasıl çalıştığını ve olası yan etkilerini tam olarak bilmiyorsanız bu ayarı değiştirmemelisiniz.",
"warn-start": "Buradaki değişiklikler yalnızca",
"warn-center": "yeni eklenen belgeler",
"warn-end": "için geçerli olacak, mevcut belgelere uygulanmaz.",
size: {
title: "Metin Parça Boyutu",
description:
"Tek bir vektörde bulunabilecek maksimum karakter uzunluğunu ifade eder.",
recommend: "Gömme modelinin maksimum karakter uzunluğu",
},
overlap: {
title: "Metin Parçalama Örtüşmesi",
description:
"İki bitişik metin parçası arasındaki, parçalama sırasında oluşabilecek maksimum karakter örtüşme miktarını belirtir.",
},
},
vector: {
title: "Vektör Veritabanı",
description:
"AnythingLLM örneğinizin nasıl çalışacağını belirleyen kimlik bilgileri ve ayarları burada bulunur. Bu anahtarların güncel ve doğru olması önemlidir.",
provider: {
title: "Vektör Veritabanı Sağlayıcısı",
description: "LanceDB için ek bir yapılandırma gerekmez.",
},
},
embeddable: {
title: "Gömülebilir Sohbet Widget'ları",
description:
"Gömülebilir sohbet widget'ları, herkese açık olan ve tek bir çalışma alanına bağlı sohbet arayüzleridir. Bu sayede oluşturduğunuz çalışma alanlarını dünyaya açık hâle getirebilirsiniz.",
create: "Gömme oluştur",
table: {
workspace: "Çalışma Alanı",
chats: "Gönderilen Sohbetler",
Active: "Aktif Alan Adları",
},
},
"embed-chats": {
title: "Gömme Sohbetler",
export: "Dışa Aktar",
description:
"Yayımladığınız herhangi bir gömme sohbetten gelen tüm kayıtlı sohbetler ve mesajlar burada bulunur.",
table: {
embed: "Gömme",
sender: "Gönderen",
message: "Mesaj",
response: "Yanıt",
at: "Gönderilme Zamanı",
},
},
multi: {
title: "Çoklu Kullanıcı Modu",
description:
"Takımınızı desteklemek için örneğinizi yapılandırın ve Çoklu Kullanıcı Modunu etkinleştirin.",
enable: {
"is-enable": "Çoklu Kullanıcı Modu Etkin",
enable: "Çoklu Kullanıcı Modunu Etkinleştir",
description:
"Varsayılan olarak tek yönetici sizsiniz. Yönetici olarak yeni kullanıcılar veya yöneticiler için hesap oluşturmanız gerekir. Şifrenizi kaybetmeyin çünkü yalnızca bir Yönetici kullanıcı şifreleri sıfırlayabilir.",
username: "Yönetici hesap kullanıcı adı",
password: "Yönetici hesap şifresi",
},
password: {
title: "Şifre Koruması",
description:
"AnythingLLM örneğinizi bir şifre ile koruyun. Bu şifreyi unutmanız hâlinde kurtarma yöntemi yoktur, bu yüzden mutlaka güvende saklayın.",
},
instance: {
title: "Örneği Şifreyle Koru",
description:
"Varsayılan olarak tek yönetici sizsiniz. Yönetici olarak yeni kullanıcılar veya yöneticiler için hesap oluşturmanız gerekir. Şifrenizi kaybetmeyin çünkü yalnızca bir Yönetici kullanıcı şifreleri sıfırlayabilir.",
password: "Örnek Şifresi",
},
},
event: {
title: "Olay Kayıtları",
description:
"Bu örnek üzerinde gerçekleşen tüm eylem ve olayları izlemek için görüntüleyin.",
clear: "Olay Kayıtlarını Temizle",
table: {
type: "Olay Türü",
user: "Kullanıcı",
occurred: "Gerçekleşme Zamanı",
},
},
privacy: {
title: "Gizlilik & Veri İşleme",
description:
"Bağlantılı üçüncü taraf sağlayıcılarla ve AnythingLLM ile verilerinizin nasıl ele alındığını burada yapılandırabilirsiniz.",
llm: "LLM Seçimi",
embedding: "Gömme Tercihi",
vector: "Vektör Veritabanı",
anonymous: "Anonim Telemetri Etkin",
},
};
export default TRANSLATIONS;

View file

@ -1,4 +1,53 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
survey: {
email: null,
useCase: null,
useCaseWork: null,
useCasePersonal: null,
useCaseOther: null,
comment: null,
commentPlaceholder: null,
skip: null,
thankYou: null,
title: null,
description: null,
},
home: {
title: null,
getStarted: null,
},
llm: {
title: null,
description: null,
},
userSetup: {
title: null,
description: null,
howManyUsers: null,
justMe: null,
myTeam: null,
instancePassword: null,
setPassword: null,
passwordReq: null,
passwordWarn: null,
adminUsername: null,
adminUsernameReq: null,
adminPassword: null,
adminPasswordReq: null,
teamHint: null,
},
data: {
title: null,
description: null,
settingsHint: null,
},
workspace: {
title: null,
description: null,
},
},
common: {
"workspaces-name": "Tên không gian làm việc",
error: "Lỗi",
@ -9,9 +58,10 @@ const TRANSLATIONS = {
save: "Lưu thay đổi",
previous: "Trang trước",
next: "Trang tiếp theo",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "Cài đặt hệ thống",
system: "Cài đặt chung",
@ -40,8 +90,6 @@ const TRANSLATIONS = {
contact: "Liên hệ hỗ trợ",
"browser-extension": "Tiện ích trình duyệt",
},
// Page Definitions
login: {
"multi-user": {
welcome: "Chào mừng đến với",
@ -64,7 +112,6 @@ const TRANSLATIONS = {
"back-to-login": "Back to Đăng nhập",
},
},
welcomeMessage: {
part1:
"Chào mừng đến với AnythingLLM, AnythingLLM is an open-source AI tool by Mintplex Labs that turns anything into a trained chatbot you can query and chat with. AnythingLLM is a BYOK (bring-your-own-keys) software so there is no subscription, fee, or charges for this software outside of the services you want to use with it.",
@ -86,13 +133,10 @@ const TRANSLATIONS = {
starOnGithub: "Star on GitHub",
contact: "Contact Mintplex Labs",
},
"new-workspace": {
title: "Không gian làm việc mới",
placeholder: "Không gian làm việc của tôi",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "Cài đặt chung",
chat: "Chat Settings",
@ -100,8 +144,6 @@ const TRANSLATIONS = {
members: "Members",
agent: "Agent Configuration",
},
// General Giao diện
general: {
vector: {
title: "Vector Count",
@ -137,8 +179,6 @@ const TRANSLATIONS = {
"workspace. This will remove all vector embeddings in your vector database.\n\nThe original source files will remain untouched. This action is irreversible.",
},
},
// Chat Settings
chat: {
llm: {
title: "Workspace LLM Provider",
@ -196,8 +236,6 @@ const TRANSLATIONS = {
hint: "Most LLMs have various acceptable ranges of valid values. Consult your LLM provider for that information.",
},
},
// Cơ sở dữ liệu Vector
"vector-workspace": {
identifier: "Vector database identifier",
snippets: {
@ -224,8 +262,6 @@ const TRANSLATIONS = {
success: "Workspace vector database was reset!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"Performance of LLMs that do not explicitly support tool-calling is highly dependent on the model's capabilities and accuracy. Some abilities may be limited or non-functional.",
@ -245,7 +281,6 @@ const TRANSLATIONS = {
"The specific LLM model that will be used for this workspace's @agent agent.",
wait: "-- waiting for models --",
},
skill: {
title: "Default agent skills",
description:
@ -284,8 +319,6 @@ const TRANSLATIONS = {
},
},
},
// Hội thoại không gian làm việc
recorded: {
title: "Hội thoại không gian làm việc",
description:
@ -300,8 +333,6 @@ const TRANSLATIONS = {
at: "Sent At",
},
},
// Giao diện
appearance: {
title: "Giao diện",
description: "Customize the appearance settings of your platform.",
@ -332,8 +363,6 @@ const TRANSLATIONS = {
link: "Link",
},
},
// Khóa API
api: {
title: "Khóa API",
description:
@ -346,14 +375,12 @@ const TRANSLATIONS = {
created: "Created",
},
},
llm: {
title: "LLM Preference",
description:
"These are the credentials and settings for your preferred LLM chat & embedding provider. Its important these keys are current and correct or else AnythingLLM will not function properly.",
provider: "LLM Provider",
},
transcription: {
title: "Chuyển đổi giọng nói Model Preference",
description:
@ -366,7 +393,6 @@ const TRANSLATIONS = {
"warn-end":
"The built-in model will automatically download on the first use.",
},
embedding: {
title: "Tùy chọn nhúng",
"desc-start":
@ -379,7 +405,6 @@ const TRANSLATIONS = {
"There is no set up required when using AnythingLLM's native embedding engine.",
},
},
text: {
title: "Tùy chọn chia nhỏ và tách văn bản",
"desc-start":
@ -395,15 +420,12 @@ const TRANSLATIONS = {
"This is the maximum length of characters that can be present in a single vector.",
recommend: "Embed model maximum length is",
},
overlap: {
title: "Text Chunk Overlap",
description:
"This is the maximum overlap of characters that occurs during chunking between two adjacent text chunks.",
},
},
// Cơ sở dữ liệu Vector
vector: {
title: "Cơ sở dữ liệu Vector",
description:
@ -413,8 +435,6 @@ const TRANSLATIONS = {
description: "There is no configuration needed for LanceDB.",
},
},
// Tiện ích hội thoại nhúng
embeddable: {
title: "Tiện ích hội thoại nhúng",
description:
@ -426,7 +446,6 @@ const TRANSLATIONS = {
Active: "Active Domains",
},
},
"embed-chats": {
title: "Embed Chats",
export: "Export",
@ -440,7 +459,6 @@ const TRANSLATIONS = {
at: "Sent At",
},
},
multi: {
title: "Multi-Người dùng Mode",
description:
@ -465,8 +483,6 @@ const TRANSLATIONS = {
password: "Instance password",
},
},
// Nhật ký sự kiện
event: {
title: "Nhật ký sự kiện",
description:
@ -478,8 +494,6 @@ const TRANSLATIONS = {
occurred: "Occurred At",
},
},
// Quyền riêng tư & Dữ liệu-Handling
privacy: {
title: "Quyền riêng tư & Dữ liệu-Handling",
description:

View file

@ -1,5 +1,57 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
home: {
title: "欢迎使用",
getStarted: "开始",
},
llm: {
title: "LLM 偏好",
description:
"AnythingLLM 可以与多家 LLM 提供商合作。这将是处理聊天的服务。",
},
userSetup: {
title: "用户设置",
description: "配置你的用户设置。",
howManyUsers: "将有多少用户使用此实例?",
justMe: "只有我",
myTeam: "我的团队",
instancePassword: "实例密码",
setPassword: "你想要设置密码吗?",
passwordReq: "密码必须至少包含 8 个字符。",
passwordWarn: "保存此密码很重要,因为没有恢复方法。",
adminUsername: "管理员账户用户名",
adminUsernameReq:
"用户名必须至少为 6 个字符,并且只能包含小写字母、数字、下划线和连字符,不含空格。",
adminPassword: "管理员账户密码",
adminPasswordReq: "密码必须至少包含 8 个字符。",
teamHint:
"默认情况下,你将是唯一的管理员。完成入职后,你可以创建和邀请其他人成为用户或管理员。不要丢失你的密码,因为只有管理员可以重置密码。",
},
data: {
title: "数据处理与隐私",
description: "我们致力于在涉及你的个人数据时提供透明和控制。",
settingsHint: "这些设置可以随时在设置中重新配置。",
},
survey: {
title: "欢迎使用 AnythingLLM",
description: "帮助我们为你的需求打造 AnythingLLM。可选。",
email: "你的电子邮件是什么?",
useCase: "你将如何使用 AnythingLLM",
useCaseWork: "用于工作",
useCasePersonal: "用于个人使用",
useCaseOther: "其他",
comment: "你是如何听说 AnythingLLM 的?",
commentPlaceholder:
"RedditTwitterGitHubYouTube 等 - 让我们知道你是如何找到我们的!",
skip: "跳过调查",
thankYou: "感谢你的反馈!",
},
workspace: {
title: "创建你的第一个工作区",
description: "创建你的第一个工作区并开始使用 AnythingLLM。",
},
},
common: {
"workspaces-name": "工作区名称",
error: "错误",
@ -10,16 +62,17 @@ const TRANSLATIONS = {
saving: "保存中...",
previous: "上一页",
next: "下一页",
optional: null,
yes: null,
no: null,
},
// Setting Sidebar menu items.
settings: {
title: "设置",
system: "系统",
invites: "邀请",
users: "用户",
workspaces: "工作区",
"workspace-chats": "对话历史记录", // "workspace-chats" should be "对话历史记录", means "chat history",or "chat history records"
"workspace-chats": "对话历史记录",
customization: "外观",
"api-keys": "API 密钥",
llm: "LLM 首选项",
@ -41,8 +94,6 @@ const TRANSLATIONS = {
contact: "联系支持",
"browser-extension": "浏览器扩展",
},
// Page Definitions
login: {
"multi-user": {
welcome: "欢迎!",
@ -59,40 +110,36 @@ const TRANSLATIONS = {
},
"password-reset": {
title: "重置密码",
description: "请提供以下必要信息以重置的密码。",
description: "请提供以下必要信息以重置的密码。",
"recovery-codes": "恢复代码",
"recovery-code": "恢复代码 {{index}}",
"back-to-login": "返回登录",
},
},
welcomeMessage: {
part1:
"欢迎使用 AnythingLLM这是由 Mintplex Labs 开发的开源 AI 工具,可以将任何东西转换为可以查询和聊天的训练有素的聊天机器人。AnythingLLM 是一款 BYOK自带密钥软件因此除了想使用的服务外,此软件不收取订阅费、费用或其他费用。",
"欢迎使用 AnythingLLM这是由 Mintplex Labs 开发的开源 AI 工具,可以将任何东西转换为可以查询和聊天的训练有素的聊天机器人。AnythingLLM 是一款 BYOK自带密钥软件因此除了想使用的服务外,此软件不收取订阅费、费用或其他费用。",
part2:
"AnythingLLM 是将强大的 AI 产品(如 OpenAi、GPT-4、LangChain、PineconeDB、ChromaDB 等)整合在一个整洁的包中而无需繁琐操作的最简单方法,可以将的生产力提高 100 倍。",
"AnythingLLM 是将强大的 AI 产品(如 OpenAi、GPT-4、LangChain、PineconeDB、ChromaDB 等)整合在一个整洁的包中而无需繁琐操作的最简单方法,可以将的生产力提高 100 倍。",
part3:
"AnythingLLM 可以完全在您的本地计算机上运行,几乎没有开销,您甚至不会注意到它的存在!无需 GPU。也可以进行云端和本地安装。\nAI 工具生态系统每天都在变得更强大。AnythingLLM 使其易于使用。",
"AnythingLLM 可以完全在你的本地计算机上运行,几乎没有开销,你甚至不会注意到它的存在!无需 GPU。也可以进行云端和本地安装。\nAI 工具生态系统每天都在变得更强大。AnythingLLM 使其易于使用。",
githubIssue: "在 Github 上创建问题",
user1: "我该如何开始?!",
part4:
"很简单。所有集合都组织成我们称之为“工作区”的桶。工作区是文件、文档、图像、PDF 和其他文件的存储桶,这些文件将被转换为 LLM 可以理解和在对话中使用的内容。\n\n可以随时添加和删除文件。",
createWorkspace: "创建的第一个工作区",
"很简单。所有集合都组织成我们称之为“工作区”的桶。工作区是文件、文档、图像、PDF 和其他文件的存储桶,这些文件将被转换为 LLM 可以理解和在对话中使用的内容。\n\n可以随时添加和删除文件。",
createWorkspace: "创建的第一个工作区",
user2: "这像是一个 AI Dropbox 吗?那么聊天呢?它是一个聊天机器人,不是吗?",
part5:
"AnythingLLM 不仅仅是一个更智能的 Dropbox。\n\nAnythingLLM 提供了两种与您的数据交流的方式:\n\n<i>查询:</i> 您的聊天将返回在您的工作区中访问的文档中找到的数据或推论。向工作区添加更多文档会使其更智能!\n\n<i>对话:</i> 的文档和正在进行的聊天记录同时为 LLM 知识做出贡献。非常适合添加基于文本的实时信息或纠正 LLM 可能存在的误解。\n\n可以在聊天过程中 <i>切换模式!</i>",
"AnythingLLM 不仅仅是一个更智能的 Dropbox。\n\nAnythingLLM 提供了两种与你的数据交流的方式:\n\n<i>查询:</i> 你的聊天将返回在你的工作区中访问的文档中找到的数据或推论。向工作区添加更多文档会使其更智能!\n\n<i>对话:</i> 的文档和正在进行的聊天记录同时为 LLM 知识做出贡献。非常适合添加基于文本的实时信息或纠正 LLM 可能存在的误解。\n\n可以在聊天过程中 <i>切换模式!</i>",
user3: "哇,这听起来很棒,让我马上试试!",
part6: "玩得开心!",
starOnGithub: "在 GitHub 上加星",
contact: "联系 Mintplex Labs",
},
"new-workspace": {
title: "新工作区",
placeholder: "我的工作区",
},
// Workspace Settings menu items
"workspaces—settings": {
general: "通用设置",
chat: "聊天设置",
@ -100,8 +147,6 @@ const TRANSLATIONS = {
members: "成员",
agent: "代理配置",
},
// General Appearance
general: {
vector: {
title: "向量数量",
@ -112,7 +157,7 @@ const TRANSLATIONS = {
},
message: {
title: "建议的聊天消息",
description: "自定义将向的工作区用户建议的消息。",
description: "自定义将向的工作区用户建议的消息。",
add: "添加新消息",
save: "保存消息",
heading: "向我解释",
@ -129,13 +174,11 @@ const TRANSLATIONS = {
description: "删除此工作区及其所有数据。这将删除所有用户的工作区。",
delete: "删除工作区",
deleting: "正在删除工作区...",
"confirm-start": "即将删除整个",
"confirm-start": "即将删除整个",
"confirm-end":
"工作区。这将删除矢量数据库中的所有矢量嵌入。\n\n原始源文件将保持不变。此操作是不可逆转的。",
},
},
// Chat Settings
chat: {
llm: {
title: "工作区 LLM 提供者",
@ -174,24 +217,22 @@ const TRANSLATIONS = {
prompt: {
title: "聊天提示",
description:
"将在此工作区上使用的提示。定义 AI 生成响应的上下文和指令。应该提供精心设计的提示,以便人工智能可以生成相关且准确的响应。",
"将在此工作区上使用的提示。定义 AI 生成响应的上下文和指令。应该提供精心设计的提示,以便人工智能可以生成相关且准确的响应。",
},
refusal: {
title: "查询模式拒绝响应",
"desc-start": "当处于",
query: "查询",
"desc-end": "模式时,当未找到上下文时,可能希望返回自定义拒绝响应。",
"desc-end": "模式时,当未找到上下文时,可能希望返回自定义拒绝响应。",
},
temperature: {
title: "LLM 温度",
"desc-start": "此设置控制的 LLM 回答的“创意”程度",
"desc-start": "此设置控制的 LLM 回答的“创意”程度",
"desc-end":
"数字越高越有创意。对于某些模型,如果设置得太高,可能会导致响应不一致。",
hint: "大多数 LLM 都有各种可接受的有效值范围。请咨询的LLM提供商以获取该信息。",
hint: "大多数 LLM 都有各种可接受的有效值范围。请咨询的LLM提供商以获取该信息。",
},
},
// Vector Database Settings
"vector-workspace": {
identifier: "向量数据库标识符",
snippets: {
@ -213,13 +254,11 @@ const TRANSLATIONS = {
reset: "重置向量数据库",
resetting: "清除向量...",
confirm:
"将重置此工作区的矢量数据库。这将删除当前嵌入的所有矢量嵌入。\n\n原始源文件将保持不变。此操作是不可逆转的。",
"将重置此工作区的矢量数据库。这将删除当前嵌入的所有矢量嵌入。\n\n原始源文件将保持不变。此操作是不可逆转的。",
success: "向量数据库已重置。",
error: "无法重置工作区向量数据库!",
},
},
// Agent Configuration
agent: {
"performance-warning":
"不明确支持工具调用的 LLMs 的性能高度依赖于模型的功能和准确性。有些能力可能受到限制或不起作用。",
@ -243,7 +282,7 @@ const TRANSLATIONS = {
rag: {
title: "RAG 和长期记忆",
description:
'允许代理利用的本地文档来回答查询,或要求代理"记住"长期记忆检索的内容片段。',
'允许代理利用的本地文档来回答查询,或要求代理"记住"长期记忆检索的内容片段。',
},
view: {
title: "查看和总结文档",
@ -260,18 +299,16 @@ const TRANSLATIONS = {
save: {
title: "生成并保存文件到浏览器",
description:
"使默认代理能够生成并写入文件,这些文件可以保存并在的浏览器中下载。",
"使默认代理能够生成并写入文件,这些文件可以保存并在的浏览器中下载。",
},
web: {
title: "实时网络搜索和浏览",
"desc-start":
"通过连接到网络搜索SERP提供者使您的代理能够搜索网络以回答您的问题。",
"通过连接到网络搜索SERP提供者使你的代理能够搜索网络以回答你的问题。",
"desc-end": "在代理会话期间,网络搜索将不起作用,直到此设置完成。",
},
},
},
// Workspace Chat
recorded: {
title: "工作区聊天历史记录",
description: "这些是用户发送的所有聊天记录和消息,按创建日期排序。",
@ -285,13 +322,12 @@ const TRANSLATIONS = {
at: "发送时间",
},
},
appearance: {
title: "外观",
description: "自定义平台的外观设置。",
logo: {
title: "自定义图标",
description: "上传您的自定义图标,让您的聊天机器人成为您的。",
description: "上传你的自定义图标,让你的聊天机器人成为你的。",
add: "添加自定义图标",
recommended: "建议尺寸800 x 200",
remove: "移除",
@ -315,8 +351,6 @@ const TRANSLATIONS = {
link: "链接",
},
},
// API Keys
api: {
title: "API 密钥",
description: "API 密钥允许持有者以编程方式访问和管理此 AnythingLLM 实例。",
@ -328,42 +362,37 @@ const TRANSLATIONS = {
created: "创建",
},
},
// LLM Preferences
llm: {
title: "LLM 首选项",
description:
"这些是首选的 LLM 聊天和嵌入提供商的凭据和设置。重要的是,这些密钥是最新的和正确的,否则 AnythingLLM 将无法正常运行。",
"这些是首选的 LLM 聊天和嵌入提供商的凭据和设置。重要的是,这些密钥是最新的和正确的,否则 AnythingLLM 将无法正常运行。",
provider: "LLM 提供商",
},
transcription: {
title: "转录模型首选项",
description:
"这些是的首选转录模型提供商的凭据和设置。重要的是这些密钥是最新且正确的,否则媒体文件和音频将无法转录。",
"这些是的首选转录模型提供商的凭据和设置。重要的是这些密钥是最新且正确的,否则媒体文件和音频将无法转录。",
provider: "转录提供商",
"warn-start":
"在 RAM 或 CPU 有限的计算机上使用本地耳语模型可能会在处理媒体文件时停止 AnythingLLM。",
"warn-recommend": "我们建议至少 2GB RAM 并上传 <10Mb 的文件。",
"warn-end": "内置模型将在首次使用时自动下载。",
},
embedding: {
title: "嵌入首选项",
"desc-start":
"当使用本身不支持嵌入引擎的 LLM 时,可能需要额外指定用于嵌入文本的凭据。",
"当使用本身不支持嵌入引擎的 LLM 时,可能需要额外指定用于嵌入文本的凭据。",
"desc-end":
"嵌入是将文本转换为矢量的过程。需要这些凭据才能将的文件和提示转换为 AnythingLLM 可以用来处理的格式。",
"嵌入是将文本转换为矢量的过程。需要这些凭据才能将的文件和提示转换为 AnythingLLM 可以用来处理的格式。",
provider: {
title: "嵌入引擎提供商",
description: "使用 AnythingLLM 的本机嵌入引擎时不需要设置。",
},
},
text: {
title: "文本拆分和分块首选项",
"desc-start":
"有时,可能希望更改新文档在插入到矢量数据库之前拆分和分块的默认方式。",
"有时,可能希望更改新文档在插入到矢量数据库之前拆分和分块的默认方式。",
"desc-end": "只有在了解文本拆分的工作原理及其副作用时,才应修改此设置。",
"warn-start": "此处的更改仅适用于",
"warn-center": "新嵌入的文档",
@ -378,8 +407,6 @@ const TRANSLATIONS = {
description: "这是在两个相邻文本块之间分块期间发生的最大字符重叠。",
},
},
// Vector Database
vector: {
title: "向量数据库",
description:
@ -389,12 +416,10 @@ const TRANSLATIONS = {
description: "LanceDB 不需要任何配置。",
},
},
// Embeddable Chats
embeddable: {
title: "可嵌入的聊天小部件",
description:
"可嵌入的聊天小部件是与单个工作区绑定的面向公众的聊天界面。这些允许您构建工作区,然后您可以将其发布到全世界。",
"可嵌入的聊天小部件是与单个工作区绑定的面向公众的聊天界面。这些允许你构建工作区,然后你可以将其发布到全世界。",
create: "创建嵌入式对话",
table: {
workspace: "工作区",
@ -402,12 +427,10 @@ const TRANSLATIONS = {
Active: "活动域",
},
},
// Embeddable Chat History
"embed-chats": {
title: "嵌入的聊天历史纪录",
export: "导出",
description: "这些是发布的任何嵌入的所有记录的聊天和消息。",
description: "这些是发布的任何嵌入的所有记录的聊天和消息。",
table: {
embed: "嵌入",
sender: "发送者",
@ -416,32 +439,29 @@ const TRANSLATIONS = {
at: "发送时间",
},
},
multi: {
title: "多用户模式",
description: "通过激活多用户模式来设置您的实例以支持您的团队。",
description: "通过激活多用户模式来设置你的实例以支持你的团队。",
enable: {
"is-enable": "多用户模式已启用",
enable: "启用多用户模式",
description:
"默认情况下,您将是唯一的管理员。作为管理员,您需要为所有新用户或管理员创建账户。不要丢失您的密码,因为只有管理员用户可以重置密码。",
"默认情况下,你将是唯一的管理员。作为管理员,你需要为所有新用户或管理员创建账户。不要丢失你的密码,因为只有管理员用户可以重置密码。",
username: "管理员账户用户名",
password: "管理员账户密码",
},
password: {
title: "密码保护",
description:
"用密码保护您的AnythingLLM实例。如果您忘记了密码,那么没有恢复方法,所以请确保保存这个密码。",
"用密码保护你的AnythingLLM实例。如果你忘记了密码,那么没有恢复方法,所以请确保保存这个密码。",
},
instance: {
title: "实例密码保护",
description:
"默认情况下,您将是唯一的管理员。作为管理员,您需要为所有新用户或管理员创建账户。不要丢失您的密码,因为只有管理员用户可以重置密码。",
"默认情况下,你将是唯一的管理员。作为管理员,你需要为所有新用户或管理员创建账户。不要丢失你的密码,因为只有管理员用户可以重置密码。",
password: "实例密码",
},
},
// Event Logs
event: {
title: "事件日志",
description: "查看此实例上发生的所有操作和事件以进行监控。",
@ -452,12 +472,10 @@ const TRANSLATIONS = {
occurred: "发生时间",
},
},
// Privacy & Data-Handling
privacy: {
title: "隐私和数据处理",
description:
"这是对如何处理连接的第三方提供商和AnythingLLM的数据的配置。",
"这是对如何处理连接的第三方提供商和AnythingLLM的数据的配置。",
llm: "LLM 选择",
embedding: "嵌入首选项",
vector: "向量数据库",

View file

@ -1,4 +1,57 @@
// Anything with "null" requires a translation. Contribute to translation via a PR!
const TRANSLATIONS = {
onboarding: {
home: {
title: "歡迎使用",
getStarted: "開始使用",
},
llm: {
title: "LLM 偏好",
description:
"AnythingLLM 可以與多家 LLM 提供商合作。這將是處理聊天的服務。",
},
userSetup: {
title: "使用者設定",
description: "配置您的使用者設定。",
howManyUsers: "將有多少使用者使用此實例?",
justMe: "只有我",
myTeam: "我的團隊",
instancePassword: "實例密碼",
setPassword: "您想要設定密碼嗎?",
passwordReq: "密碼必須至少包含 8 個字元。",
passwordWarn: "保存此密碼很重要,因為沒有恢復方法。",
adminUsername: "管理員帳號使用者名稱",
adminUsernameReq:
"使用者名稱必須至少為 6 個字元,並且只能包含小寫字母、數字、底線和連字號,不含空格。",
adminPassword: "管理員帳號密碼",
adminPasswordReq: "密碼必須至少包含 8 個字元。",
teamHint:
"預設情況下,您將是唯一的管理員。完成入職後,您可以創建和邀請其他人成為使用者或管理員。不要遺失您的密碼,因為只有管理員可以重置密碼。",
},
data: {
title: "資料處理與隱私",
description: "我們致力於在涉及您的個人資料時提供透明和控制。",
settingsHint: "這些設定可以隨時在設定中重新配置。",
},
survey: {
title: "歡迎使用 AnythingLLM",
description: "幫助我們為您的需求打造 AnythingLLM。可選。",
email: "您的電子郵件是什麼?",
useCase: "您將如何使用 AnythingLLM",
useCaseWork: "用於工作",
useCasePersonal: "用於個人使用",
useCaseOther: "其他",
comment: "您是如何聽說 AnythingLLM 的?",
commentPlaceholder:
"RedditTwitterGitHubYouTube 等 - 讓我們知道您是如何找到我們的!",
skip: "跳過調查",
thankYou: "感謝您的反饋!",
},
workspace: {
title: "創建您的第一個工作區",
description: "創建您的第一個工作區並開始使用 AnythingLLM。",
},
},
common: {
"workspaces-name": "工作區名稱",
error: "錯誤",
@ -9,9 +62,10 @@ const TRANSLATIONS = {
save: "儲存修改",
previous: "上一頁",
next: "下一頁",
optional: null,
yes: null,
no: null,
},
// 設定側邊欄選單項目
settings: {
title: "系統設定",
system: "一般設定",
@ -40,8 +94,6 @@ const TRANSLATIONS = {
contact: "聯絡支援",
"browser-extension": "瀏覽器擴充功能",
},
// 頁面定義
login: {
"multi-user": {
welcome: "歡迎使用",
@ -64,7 +116,6 @@ const TRANSLATIONS = {
"back-to-login": "返回登入頁面",
},
},
welcomeMessage: {
part1:
"歡迎使用 AnythingLLMAnythingLLM 是由 Mintplex Labs 開發的開源 AI 工具它能將任何內容轉換成可供查詢和對話的訓練模型對話機器人。AnythingLLM 採用 BYOK自備金鑰軟體模式除了您想使用的服務之外本軟體不收取任何訂閱費、費用或其他費用。",
@ -85,13 +136,10 @@ const TRANSLATIONS = {
starOnGithub: "在 GitHub 上給我們星星",
contact: "聯絡 Mintplex Labs",
},
"new-workspace": {
title: "新增工作區",
placeholder: "我的工作區",
},
// 工作區設定選單項目
"workspaces—settings": {
general: "一般設定",
chat: "對話設定",
@ -99,8 +147,6 @@ const TRANSLATIONS = {
members: "成員管理",
agent: "智慧代理人設定",
},
// 一般外觀
general: {
vector: {
title: "向量計數",
@ -133,8 +179,6 @@ const TRANSLATIONS = {
"工作區。這將會移除向量資料庫中的所有向量嵌入。\n\n原始檔案將保持不變。此動作無法復原。",
},
},
// 對話設定
chat: {
llm: {
title: "工作區 LLM 提供者",
@ -188,8 +232,6 @@ const TRANSLATIONS = {
hint: "大多數 LLM 都有各種可接受的有效值範圍。請查詢您的 LLM 提供者以取得該資訊。",
},
},
// 向量資料庫
"vector-workspace": {
identifier: "向量資料庫識別碼",
snippets: {
@ -216,8 +258,6 @@ const TRANSLATIONS = {
success: "工作區向量資料庫已重設!",
},
},
// 智慧代理人設定
agent: {
"performance-warning":
"不直接支援工具呼叫的 LLM 的效能,高度取決於模型的功能和精確度。某些功能可能受限或無法使用。",
@ -234,7 +274,6 @@ const TRANSLATIONS = {
description: "此工作區 @agent 智慧代理人將使用的特定 LLM 模型。",
wait: "-- 等待模型中 --",
},
skill: {
title: "預設智慧代理人技能",
description:
@ -271,8 +310,6 @@ const TRANSLATIONS = {
},
},
},
// 工作區對話紀錄
recorded: {
title: "工作區對話紀錄",
description: "這些是所有已記錄的對話和訊息,依建立日期排序。",
@ -286,8 +323,6 @@ const TRANSLATIONS = {
at: "傳送時間",
},
},
// 外觀
appearance: {
title: "外觀",
description: "自訂平台的外觀設定。",
@ -317,8 +352,6 @@ const TRANSLATIONS = {
link: "連結",
},
},
// API 金鑰
api: {
title: "API 金鑰",
description:
@ -331,14 +364,12 @@ const TRANSLATIONS = {
created: "建立時間",
},
},
llm: {
title: "LLM 偏好設定",
description:
"這些是您偏好的 LLM 對話與嵌入提供者的憑證和設定。確保這些金鑰是最新且正確的,否則 AnythingLLM 將無法正常運作。",
provider: "LLM 提供者",
},
transcription: {
title: "語音轉錄模型偏好設定",
description:
@ -349,7 +380,6 @@ const TRANSLATIONS = {
"warn-recommend": "我們建議至少 2GB 的記憶體,並且上傳小於 10MB 的檔案。",
"warn-end": "內建模型將會在第一次使用時自動下載。",
},
embedding: {
title: "向量嵌入偏好設定",
"desc-start":
@ -361,7 +391,6 @@ const TRANSLATIONS = {
description: "使用 AnythingLLM 的原生嵌入引擎時,不需要任何設定。",
},
},
text: {
title: "文字分割與區塊化偏好設定",
"desc-start":
@ -376,14 +405,11 @@ const TRANSLATIONS = {
description: "這是單一向量中可包含的最大字元長度。",
recommend: "嵌入模型的最大長度為",
},
overlap: {
title: "文字區塊重疊",
description: "這是區塊化過程中,兩個相鄰文字區塊之間的最大字元重疊數。",
},
},
// 向量資料庫
vector: {
title: "向量資料庫",
description:
@ -393,8 +419,6 @@ const TRANSLATIONS = {
description: "使用 LanceDB 不需要任何設定。",
},
},
// 可嵌入對話小工具
embeddable: {
title: "可嵌入對話小工具",
description:
@ -406,7 +430,6 @@ const TRANSLATIONS = {
Active: "已啟用網域",
},
},
"embed-chats": {
title: "嵌入對話",
export: "匯出",
@ -419,7 +442,6 @@ const TRANSLATIONS = {
at: "傳送時間",
},
},
multi: {
title: "多使用者模式",
description: "透過啟用多使用者模式來設定您的系統,以支援您的團隊。",
@ -443,8 +465,6 @@ const TRANSLATIONS = {
password: "系統密碼",
},
},
// 事件記錄
event: {
title: "事件記錄",
description: "檢視此系統上發生的所有動作和事件,以進行監控。",
@ -455,8 +475,6 @@ const TRANSLATIONS = {
occurred: "發生時間",
},
},
// 隱私與資料處理
privacy: {
title: "隱私與資料處理",
description:

View file

@ -38,7 +38,7 @@ export default function DBConnection({ connection, onRemove, setHasChanges }) {
<button
type="button"
onClick={removeConfirmation}
className="border-none text-white/40 hover:text-red-500"
className="border-none text-white hover:text-red-500"
>
<X size={24} />
</button>

View file

@ -15,13 +15,9 @@ export default function Main() {
}
return (
<>
<UserMenu>
<div className="w-screen h-screen overflow-hidden bg-theme-bg-container flex">
{!isMobile && <Sidebar />}
<DefaultChatContainer />
</div>
</UserMenu>
</>
);
}

View file

@ -6,19 +6,17 @@ import { useNavigate } from "react-router-dom";
import Workspace from "@/models/workspace";
import { useTranslation } from "react-i18next";
const TITLE = "Create your first workspace";
const DESCRIPTION =
"Create your first workspace and get started with AnythingLLM.";
export default function CreateWorkspace({
setHeader,
setForwardBtn,
setBackBtn,
}) {
const { t } = useTranslation();
const [workspaceName, setWorkspaceName] = useState("");
const navigate = useNavigate();
const createWorkspaceRef = useRef();
const { t } = useTranslation();
const TITLE = t("onboarding.workspace.title");
const DESCRIPTION = t("onboarding.workspace.description");
useEffect(() => {
setHeader({ title: TITLE, description: DESCRIPTION });

View file

@ -40,10 +40,8 @@ import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
import { useTranslation } from "react-i18next";
const TITLE = "Data Handling & Privacy";
const DESCRIPTION =
"We are committed to transparency and control when it comes to your personal data.";
export const LLM_SELECTION_PRIVACY = {
openai: {
name: "OpenAI",
@ -406,12 +404,16 @@ export const FALLBACKS = {
};
export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
const { t } = useTranslation();
const [llmChoice, setLLMChoice] = useState("openai");
const [loading, setLoading] = useState(true);
const [vectorDb, setVectorDb] = useState("pinecone");
const [embeddingEngine, setEmbeddingEngine] = useState("openai");
const navigate = useNavigate();
const TITLE = t("onboarding.data.title");
const DESCRIPTION = t("onboarding.data.description");
useEffect(() => {
setHeader({ title: TITLE, description: DESCRIPTION });
setForwardBtn({ showing: true, disabled: false, onClick: handleForward });
@ -515,7 +517,7 @@ export default function DataHandling({ setHeader, setForwardBtn, setBackBtn }) {
</div>
</div>
<p className="text-theme-text-secondary text-sm font-medium py-1">
These settings can be reconfigured at any time in the settings.
{t("onboarding.data.settingsHint")}
</p>
</div>
);

View file

@ -6,6 +6,7 @@ import RGroupImgLight from "./r_group-light.png";
import AnythingLLMLogo from "@/media/logo/anything-llm.png";
import { useNavigate } from "react-router-dom";
import { useTheme } from "@/hooks/useTheme";
import { useTranslation } from "react-i18next";
const IMG_SRCSET = {
light: {
@ -21,6 +22,7 @@ const IMG_SRCSET = {
export default function OnboardingHome() {
const navigate = useNavigate();
const { theme } = useTheme();
const { t } = useTranslation();
const srcSet = IMG_SRCSET?.[theme] || IMG_SRCSET.default;
return (
@ -39,7 +41,7 @@ export default function OnboardingHome() {
<div className="relative flex justify-center items-center m-auto">
<div className="flex flex-col justify-center items-center">
<p className="text-theme-text-primary font-thin text-[24px]">
Welcome to
{t("onboarding.home.title")}
</p>
<img
src={AnythingLLMLogo}
@ -50,7 +52,7 @@ export default function OnboardingHome() {
onClick={() => navigate(paths.onboarding.llmPreference())}
className="border-[2px] border-theme-text-primary animate-pulse light:animate-none w-full md:max-w-[350px] md:min-w-[300px] text-center py-3 bg-theme-button-primary hover:bg-theme-bg-secondary text-theme-text-primary font-semibold text-sm my-10 rounded-md "
>
Get started
{t("onboarding.home.getStarted")}
</button>
</div>
</div>

View file

@ -10,7 +10,6 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
import LocalAiLogo from "@/media/llmprovider/localai.png";
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
import FireworksAILogo from "@/media/llmprovider/fireworksai.jpeg";
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
import PerplexityLogo from "@/media/llmprovider/perplexity.png";
@ -57,10 +56,7 @@ import System from "@/models/system";
import paths from "@/utils/paths";
import showToast from "@/utils/toast";
import { useNavigate } from "react-router-dom";
const TITLE = "LLM Preference";
const DESCRIPTION =
"AnythingLLM can work with many LLM providers. This will be the service which handles chatting.";
import { useTranslation } from "react-i18next";
const LLMS = [
{
@ -253,6 +249,7 @@ export default function LLMPreference({
setForwardBtn,
setBackBtn,
}) {
const { t } = useTranslation();
const [searchQuery, setSearchQuery] = useState("");
const [filteredLLMs, setFilteredLLMs] = useState([]);
const [selectedLLM, setSelectedLLM] = useState(null);
@ -262,6 +259,9 @@ export default function LLMPreference({
const isHosted = window.location.hostname.includes("useanything.com");
const navigate = useNavigate();
const TITLE = t("onboarding.llm.title");
const DESCRIPTION = t("onboarding.llm.description");
useEffect(() => {
async function fetchKeys() {
const _settings = await System.keys();

View file

@ -6,9 +6,7 @@ import paths from "@/utils/paths";
import { CheckCircle } from "@phosphor-icons/react";
import React, { useState, useEffect, useRef } from "react";
import { useNavigate } from "react-router-dom";
const TITLE = "Welcome to AnythingLLM";
const DESCRIPTION = "Help us make AnythingLLM built for your needs. Optional.";
import { useTranslation } from "react-i18next";
async function sendQuestionnaire({ email, useCase, comment }) {
if (import.meta.env.DEV) {
@ -44,11 +42,15 @@ async function sendQuestionnaire({ email, useCase, comment }) {
}
export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
const { t } = useTranslation();
const [selectedOption, setSelectedOption] = useState("");
const formRef = useRef(null);
const navigate = useNavigate();
const submitRef = useRef(null);
const TITLE = t("onboarding.survey.title");
const DESCRIPTION = t("onboarding.survey.description");
function handleForward() {
if (!!window?.localStorage?.getItem(COMPLETE_QUESTIONNAIRE)) {
navigate(paths.onboarding.createWorkspace());
@ -109,7 +111,9 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
<div className="w-full flex items-center justify-center px-1 md:px-8 py-4">
<div className="w-auto flex flex-col gap-y-1 items-center">
<CheckCircle size={60} className="text-green-500" />
<p className="text-white text-lg">Thank you for your feedback!</p>
<p className="text-white text-lg">
{t("onboarding.survey.thankYou")}
</p>
<a
href={paths.mailToMintplex()}
className="text-sky-400 underline text-xs"
@ -130,7 +134,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
htmlFor="email"
className="text-theme-text-primary text-base font-medium"
>
What's your email?{" "}
{t("onboarding.survey.email")}{" "}
</label>
<input
name="email"
@ -146,7 +150,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
className="text-theme-text-primary text-base font-medium"
htmlFor="use_case"
>
What will you use AnythingLLM for?{" "}
{t("onboarding.survey.useCase")}{" "}
</label>
<div className="mt-2 gap-y-3 flex flex-col">
<label
@ -172,7 +176,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
}`}
></div>
<div className="text-theme-text-primary text-sm font-medium font-['Plus Jakarta Sans'] leading-tight">
For work
{t("onboarding.survey.useCaseWork")}
</div>
</label>
<label
@ -198,7 +202,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
}`}
></div>
<div className="text-theme-text-primary text-sm font-medium font-['Plus Jakarta Sans'] leading-tight">
For my personal use
{t("onboarding.survey.useCasePersonal")}
</div>
</label>
<label
@ -224,7 +228,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
}`}
></div>
<div className="text-theme-text-primary text-sm font-medium font-['Plus Jakarta Sans'] leading-tight">
Other
{t("onboarding.survey.useCaseOther")}
</div>
</label>
</div>
@ -232,16 +236,16 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
<div className="mt-8">
<label htmlFor="comment" className="text-white text-base font-medium">
Any comments for the team?{" "}
{t("onboarding.survey.comment")}{" "}
<span className="text-neutral-400 text-base font-light">
(Optional)
({t("common.optional")})
</span>
</label>
<textarea
name="comment"
rows={5}
className="mt-2 bg-theme-settings-input-bg text-white text-sm rounded-lg focus:outline-primary-button active:outline-primary-button placeholder:text-theme-settings-input-placeholder outline-none block w-full p-2.5"
placeholder="If you have any questions or comments right now, you can leave them here and we will get back to you. You can also email team@mintplexlabs.com"
placeholder={t("onboarding.survey.commentPlaceholder")}
wrap="soft"
autoComplete="off"
/>
@ -259,7 +263,7 @@ export default function Survey({ setHeader, setForwardBtn, setBackBtn }) {
onClick={skipSurvey}
className="text-white text-base font-medium text-opacity-30 hover:text-opacity-100 mt-8"
>
Skip Survey
{t("onboarding.survey.skip")}
</button>
</div>
</form>

View file

@ -5,11 +5,10 @@ import debounce from "lodash.debounce";
import paths from "@/utils/paths";
import { useNavigate } from "react-router-dom";
import { AUTH_TIMESTAMP, AUTH_TOKEN, AUTH_USER } from "@/utils/constants";
const TITLE = "User Setup";
const DESCRIPTION = "Configure your user settings.";
import { useTranslation } from "react-i18next";
export default function UserSetup({ setHeader, setForwardBtn, setBackBtn }) {
const { t } = useTranslation();
const [selectedOption, setSelectedOption] = useState("");
const [singleUserPasswordValid, setSingleUserPasswordValid] = useState(false);
const [multiUserLoginValid, setMultiUserLoginValid] = useState(false);
@ -18,6 +17,9 @@ export default function UserSetup({ setHeader, setForwardBtn, setBackBtn }) {
const justMeSubmitRef = useRef(null);
const navigate = useNavigate();
const TITLE = t("onboarding.userSetup.title");
const DESCRIPTION = t("onboarding.userSetup.description");
function handleForward() {
if (selectedOption === "just_me" && enablePassword) {
justMeSubmitRef.current?.click();
@ -56,7 +58,7 @@ export default function UserSetup({ setHeader, setForwardBtn, setBackBtn }) {
<div className="w-full flex items-center justify-center flex-col gap-y-6">
<div className="flex flex-col border rounded-lg border-white/20 light:border-theme-sidebar-border p-8 items-center gap-y-4 w-full max-w-[600px]">
<div className=" text-white text-sm font-semibold md:-ml-44">
How many people will be using your instance?
{t("onboarding.userSetup.howManyUsers")}
</div>
<div className="flex flex-col md:flex-row gap-6 w-full justify-center">
<button
@ -67,7 +69,9 @@ export default function UserSetup({ setHeader, setForwardBtn, setBackBtn }) {
: "text-theme-text-primary border-theme-sidebar-border"
} min-w-[230px] h-11 p-4 rounded-[10px] border-2 justify-center items-center gap-[100px] inline-flex hover:border-sky-400/70 hover:text-sky-400 transition-all duration-300`}
>
<div className="text-center text-sm font-bold">Just me</div>
<div className="text-center text-sm font-bold">
{t("onboarding.userSetup.justMe")}
</div>
</button>
<button
onClick={() => setSelectedOption("my_team")}
@ -77,7 +81,9 @@ export default function UserSetup({ setHeader, setForwardBtn, setBackBtn }) {
: "text-theme-text-primary border-theme-sidebar-border"
} min-w-[230px] h-11 p-4 rounded-[10px] border-2 justify-center items-center gap-[100px] inline-flex hover:border-sky-400/70 hover:text-sky-400 transition-all duration-300`}
>
<div className="text-center text-sm font-bold">My team</div>
<div className="text-center text-sm font-bold">
{t("onboarding.userSetup.myTeam")}
</div>
</button>
</div>
</div>
@ -108,6 +114,7 @@ const JustMe = ({
justMeSubmitRef,
navigate,
}) => {
const { t } = useTranslation();
const [itemSelected, setItemSelected] = useState(false);
const [password, setPassword] = useState("");
const handleSubmit = async (e) => {
@ -162,7 +169,7 @@ const JustMe = ({
<div className="w-full flex items-center justify-center flex-col gap-y-6">
<div className="flex flex-col border rounded-lg border-white/20 light:border-theme-sidebar-border p-8 items-center gap-y-4 w-full max-w-[600px]">
<div className=" text-white text-sm font-semibold md:-ml-56">
Would you like to set up a password?
{t("onboarding.userSetup.setPassword")}
</div>
<div className="flex flex-col md:flex-row gap-6 w-full justify-center">
<button
@ -173,7 +180,9 @@ const JustMe = ({
: "text-theme-text-primary border-theme-sidebar-border"
} min-w-[230px] h-11 p-4 rounded-[10px] border-2 justify-center items-center gap-[100px] inline-flex hover:border-sky-400/70 hover:text-sky-400 transition-all duration-300`}
>
<div className="text-center text-sm font-bold">Yes</div>
<div className="text-center text-sm font-bold">
{t("common.yes")}
</div>
</button>
<button
onClick={handleNo}
@ -183,7 +192,9 @@ const JustMe = ({
: "text-theme-text-primary border-theme-sidebar-border"
} min-w-[230px] h-11 p-4 rounded-[10px] border-2 justify-center items-center gap-[100px] inline-flex hover:border-sky-400/70 hover:text-sky-400 transition-all duration-300`}
>
<div className="text-center text-sm font-bold">No</div>
<div className="text-center text-sm font-bold">
{t("common.no")}
</div>
</button>
</div>
{enablePassword && (
@ -192,7 +203,7 @@ const JustMe = ({
htmlFor="name"
className="block mb-3 text-sm font-medium text-white"
>
Instance Password
{t("onboarding.userSetup.instancePassword")}
</label>
<input
name="password"
@ -205,12 +216,9 @@ const JustMe = ({
onChange={handlePasswordChange}
/>
<div className="mt-4 text-white text-opacity-80 text-xs font-base -mb-2">
Passwords must be at least 8 characters.
{t("onboarding.userSetup.passwordReq")}
<br />
<i>
It's important to save this password because there is no
recovery method.
</i>{" "}
<i>{t("onboarding.userSetup.passwordWarn")}</i>{" "}
</div>
<button
type="submit"
@ -226,6 +234,7 @@ const JustMe = ({
};
const MyTeam = ({ setMultiUserLoginValid, myTeamSubmitRef, navigate }) => {
const { t } = useTranslation();
const [username, setUsername] = useState("");
const [password, setPassword] = useState("");
@ -275,7 +284,7 @@ const MyTeam = ({ setMultiUserLoginValid, myTeamSubmitRef, navigate }) => {
htmlFor="name"
className="block mb-3 text-sm font-medium text-white"
>
Admin account username
{t("common.adminUsername")}
</label>
<input
name="username"
@ -289,16 +298,14 @@ const MyTeam = ({ setMultiUserLoginValid, myTeamSubmitRef, navigate }) => {
/>
</div>
<p className=" text-white text-opacity-80 text-xs font-base">
Username must be at least 6 characters long and only contain
lowercase letters, numbers, underscores, and hyphens with no
spaces.
{t("onboarding.userSetup.adminUsernameReq")}
</p>
<div className="mt-4">
<label
htmlFor="name"
className="block mb-3 text-sm font-medium text-white"
>
Admin account password
{t("onboarding.userSetup.adminPassword")}
</label>
<input
name="password"
@ -312,16 +319,14 @@ const MyTeam = ({ setMultiUserLoginValid, myTeamSubmitRef, navigate }) => {
/>
</div>
<p className=" text-white text-opacity-80 text-xs font-base">
Password must be at least 8 characters long.
{t("onboarding.userSetup.adminPasswordReq")}
</p>
</div>
</div>
</div>
<div className="flex w-full justify-between items-center px-6 py-4 space-x-6 border-t rounded-b border-theme-sidebar-border">
<div className="text-theme-text-secondary text-opacity-80 text-xs font-base">
By default, you will be the only admin. Once onboarding is completed
you can create and invite others to be users or admins. Do not lose
your password as only admins can reset passwords.
{t("onboarding.userSetup.teamHint")}
</div>
</div>
<button

View file

@ -24,6 +24,8 @@ function supportedModel(provider, model = "") {
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
"o3-mini",
"o3-mini-2025-01-31",
].includes(model) === false
);
}

View file

@ -442,6 +442,7 @@ const SystemSettings = {
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
AzureOpenAiModelType: process.env.AZURE_OPENAI_MODEL_TYPE || "default",
// Anthropic Keys
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,

View file

@ -25,6 +25,8 @@ class AzureOpenAiLLM {
}
);
this.model = modelPreference ?? process.env.OPEN_MODEL_PREF;
this.isOTypeModel =
process.env.AZURE_OPENAI_MODEL_TYPE === "reasoning" || false;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
@ -34,7 +36,7 @@ class AzureOpenAiLLM {
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Initialized. Model "${this.model}" @ ${this.promptWindowLimit()} tokens. API-Version: ${this.apiVersion}`
`Initialized. Model "${this.model}" @ ${this.promptWindowLimit()} tokens.\nAPI-Version: ${this.apiVersion}.\nModel Type: ${this.isOTypeModel ? "reasoning" : "default"}`
);
}
@ -55,6 +57,13 @@ class AzureOpenAiLLM {
}
streamingEnabled() {
// Streaming of reasoning models is not supported
if (this.isOTypeModel) {
this.#log(
"Streaming will be disabled. AZURE_OPENAI_MODEL_TYPE is set to 'reasoning'."
);
return false;
}
return "streamGetChatCompletion" in this;
}
@ -110,7 +119,7 @@ class AzureOpenAiLLM {
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
role: this.isOTypeModel ? "user" : "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
@ -131,7 +140,7 @@ class AzureOpenAiLLM {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.getChatCompletions(this.model, messages, {
temperature,
...(this.isOTypeModel ? {} : { temperature }),
})
);
@ -161,7 +170,7 @@ class AzureOpenAiLLM {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
await this.openai.streamChatCompletions(this.model, messages, {
temperature,
...(this.isOTypeModel ? {} : { temperature }),
n: 1,
}),
messages

View file

@ -2,10 +2,12 @@ const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const { v4: uuidv4 } = require("uuid");
const { MODEL_MAP } = require("../modelMap");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
class DeepSeekLLM {
constructor(embedder = null, modelPreference = null) {
@ -27,6 +29,11 @@ class DeepSeekLLM {
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log("Initialized with model:", this.model);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
@ -71,6 +78,21 @@ class DeepSeekLLM {
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (
!!message?.reasoning_content &&
message.reasoning_content.trim().length > 0
)
textResponse = `<think>${message.reasoning_content}</think>${textResponse}`;
return textResponse;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
@ -90,13 +112,15 @@ class DeepSeekLLM {
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
return null;
throw new Error(
`Invalid response body returned from DeepSeek: ${JSON.stringify(result.output)}`
);
return {
textResponse: result.output.choices[0].message.content,
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
@ -127,8 +151,143 @@ class DeepSeekLLM {
return measuredStreamRequest;
}
// TODO: This is a copy of the generic handleStream function in responses.js
// to specifically handle the DeepSeek reasoning model `reasoning_content` field.
// When or if ever possible, we should refactor this to be in the generic function.
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning_content;
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
// Reasoning models will always return the reasoning text before the token text.
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
async embedTextInput(textInput) {

View file

@ -66,10 +66,13 @@ const MODEL_MAP = {
"o1-preview-2024-09-12": 128_000,
"o1-mini": 128_000,
"o1-mini-2024-09-12": 128_000,
"o3-mini": 200_000,
"o3-mini-2025-01-31": 200_000,
},
deepseek: {
"deepseek-chat": 128_000,
"deepseek-coder": 128_000,
"deepseek-reasoner": 128_000,
},
xai: {
"grok-beta": 131_072,

View file

@ -31,8 +31,8 @@ class OpenAiLLM {
* Check if the model is an o1 model.
* @returns {boolean}
*/
get isO1Model() {
return this.model.startsWith("o1");
get isOTypeModel() {
return this.model.startsWith("o");
}
#appendContext(contextTexts = []) {
@ -48,7 +48,8 @@ class OpenAiLLM {
}
streamingEnabled() {
if (this.isO1Model) return false;
// o3-mini is the only o-type model that supports streaming
if (this.isOTypeModel && this.model !== "o3-mini") return false;
return "streamGetChatCompletion" in this;
}
@ -68,7 +69,7 @@ class OpenAiLLM {
async isValidChatCompletionModel(modelName = "") {
const isPreset =
modelName.toLowerCase().includes("gpt") ||
modelName.toLowerCase().includes("o1");
modelName.toLowerCase().startsWith("o");
if (isPreset) return true;
const model = await this.openai.models
@ -117,7 +118,7 @@ class OpenAiLLM {
// in order to combat this, we can use the "user" role as a replacement for now
// https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
const prompt = {
role: this.isO1Model ? "user" : "system",
role: this.isOTypeModel ? "user" : "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
@ -141,7 +142,7 @@ class OpenAiLLM {
.create({
model: this.model,
messages,
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
})
.catch((e) => {
throw new Error(e.message);
@ -177,7 +178,7 @@ class OpenAiLLM {
model: this.model,
stream: true,
messages,
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
}),
messages
// runPromptTokenCalculation: true - We manually count the tokens because OpenAI does not provide them in the stream

View file

@ -167,6 +167,18 @@ class OpenRouterLLM {
return content.flat();
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (!!message?.reasoning && message.reasoning.trim().length > 0)
textResponse = `<think>${message.reasoning}</think>${textResponse}`;
return textResponse;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
@ -200,6 +212,9 @@ class OpenRouterLLM {
model: this.model,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
})
.catch((e) => {
throw new Error(e.message);
@ -207,13 +222,15 @@ class OpenRouterLLM {
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
return null;
throw new Error(
`Invalid response body returned from OpenRouter: ${result.output?.error?.message || "Unknown error"} ${result.output?.error?.code || "Unknown code"}`
);
return {
textResponse: result.output.choices[0].message.content,
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
@ -236,6 +253,9 @@ class OpenRouterLLM {
stream: true,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
}),
messages
// We have to manually count the tokens
@ -262,6 +282,7 @@ class OpenRouterLLM {
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
let lastChunkTime = null; // null when first token is still not received.
// Establish listener to early-abort a streaming response
@ -313,8 +334,55 @@ class OpenRouterLLM {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning;
lastChunkTime = Number(new Date());
// Reasoning models will always return the reasoning text before the token text.
// can be null or ''
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
// If the reasoning text is not empty, we need to append the reasoning text
// to the existing reasoning text.
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
writeResponseChunk(response, {

View file

@ -1,6 +1,8 @@
const { v4: uuidv4 } = require("uuid");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
@ -137,8 +139,137 @@ class PerplexityLLM {
return measuredStreamRequest;
}
enrichToken(token, citations) {
if (Array.isArray(citations) && citations.length !== 0) {
return token.replace(/\[(\d+)\]/g, (match, index) => {
const citationIndex = parseInt(index) - 1;
return citations[citationIndex]
? `[[${index}](${citations[citationIndex]})]`
: match;
});
}
return token;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
const timeoutThresholdMs = 800;
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let pplxCitations = []; // Array of links
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null;
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
console.log(
`Perplexity stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
}, 500);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
lastChunkTime = Number(new Date());
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (Array.isArray(chunk.citations) && chunk.citations.length !== 0) {
pplxCitations = chunk.citations;
}
// If we see usage metrics in the chunk, we can use them directly
// instead of estimating them, but we only want to assign values if
// the response object is the exact same key:value pair we expect.
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
if (token) {
let enrichedToken = this.enrichToken(token, pplxCitations);
fullText += enrichedToken;
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: enrichedToken,
close: false,
error: false,
});
}
if (message?.finish_reason) {
console.log("closing");
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations

View file

@ -1,4 +1,14 @@
const MODELS = {
"sonar-reasoning-pro": {
id: "sonar-reasoning-pro",
name: "sonar-reasoning-pro",
maxLength: 127072,
},
"sonar-reasoning": {
id: "sonar-reasoning",
name: "sonar-reasoning",
maxLength: 127072,
},
"sonar-pro": {
id: "sonar-pro",
name: "sonar-pro",

View file

@ -1,4 +1,6 @@
| Model | Parameter Count | Context Length | Model Type |
| :---------------------------------- | :-------------- | :------------- | :-------------- |
| `sonar-reasoning-pro` | 8B | 127,072 | Chat Completion |
| `sonar-reasoning` | 8B | 127,072 | Chat Completion |
| `sonar-pro` | 8B | 200,000 | Chat Completion |
| `sonar` | 8B | 127,072 | Chat Completion |

View file

@ -7,7 +7,7 @@ class NativeEmbedder {
// This is a folder that Mintplex Labs hosts for those who cannot capture the HF model download
// endpoint for various reasons. This endpoint is not guaranteed to be active or maintained
// and may go offline at any time at Mintplex Labs's discretion.
#fallbackHost = "https://cdn.useanything.com/support/models/";
#fallbackHost = "https://cdn.anythingllm.com/support/models/";
constructor() {
// Model Card: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2

View file

@ -1,4 +1,5 @@
const { maximumChunkLength } = require("../../helpers");
const { Ollama } = require("ollama");
class OllamaEmbedder {
constructor() {
@ -7,21 +8,27 @@ class OllamaEmbedder {
if (!process.env.EMBEDDING_MODEL_PREF)
throw new Error("No embedding model was set.");
this.basePath = `${process.env.EMBEDDING_BASE_PATH}/api/embeddings`;
this.basePath = process.env.EMBEDDING_BASE_PATH;
this.model = process.env.EMBEDDING_MODEL_PREF;
// Limit of how many strings we can process in a single pass to stay with resource or network limits
this.maxConcurrentChunks = 1;
this.embeddingMaxChunkLength = maximumChunkLength();
this.client = new Ollama({ host: this.basePath });
this.log(
`initialized with model ${this.model} at ${this.basePath}. num_ctx: ${this.embeddingMaxChunkLength}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
/**
* Checks if the Ollama service is alive by pinging the base path.
* @returns {Promise<boolean>} - A promise that resolves to true if the service is alive, false otherwise.
*/
async #isAlive() {
return await fetch(process.env.EMBEDDING_BASE_PATH, {
method: "HEAD",
})
return await fetch(this.basePath)
.then((res) => res.ok)
.catch((e) => {
this.log(e.message);
@ -40,6 +47,13 @@ class OllamaEmbedder {
* This function takes an array of text chunks and embeds them using the Ollama API.
* chunks are processed sequentially to avoid overwhelming the API with too many requests
* or running out of resources on the endpoint running the ollama instance.
*
* We will use the num_ctx option to set the maximum context window to the max chunk length defined by the user in the settings
* so that the maximum context window is used and content is not truncated.
*
* We also assume the default keep alive option. This could cause issues with models being unloaded and reloaded
* on load memory machines, but that is simply a user-end issue we cannot control. If the LLM and embedder are
* constantly being loaded and unloaded, the user should use another LLM or Embedder to avoid this issue.
* @param {string[]} textChunks - An array of text chunks to embed.
* @returns {Promise<Array<number[]>>} - A promise that resolves to an array of embeddings.
*/
@ -48,7 +62,6 @@ class OllamaEmbedder {
throw new Error(
`Ollama service could not be reached. Is Ollama running?`
);
this.log(
`Embedding ${textChunks.length} chunks of text with ${this.model}.`
);
@ -58,15 +71,17 @@ class OllamaEmbedder {
for (const chunk of textChunks) {
try {
const res = await fetch(this.basePath, {
method: "POST",
body: JSON.stringify({
const res = await this.client.embeddings({
model: this.model,
prompt: chunk,
}),
options: {
// Always set the num_ctx to the max chunk length defined by the user in the settings
// so that the maximum context window is used and content is not truncated.
num_ctx: this.embeddingMaxChunkLength,
},
});
const { embedding } = await res.json();
const { embedding } = res;
if (!Array.isArray(embedding) || embedding.length === 0)
throw new Error("Ollama returned an empty embedding for chunk!");

View file

@ -9,7 +9,7 @@ class NativeEmbeddingReranker {
// This is a folder that Mintplex Labs hosts for those who cannot capture the HF model download
// endpoint for various reasons. This endpoint is not guaranteed to be active or maintained
// and may go offline at any time at Mintplex Labs's discretion.
#fallbackHost = "https://cdn.useanything.com/support/models/";
#fallbackHost = "https://cdn.anythingllm.com/support/models/";
constructor() {
// An alternative model to the mixedbread-ai/mxbai-rerank-xsmall-v1 model (speed on CPU is much slower for this model @ 18docs = 6s)
@ -87,7 +87,7 @@ class NativeEmbeddingReranker {
// Attempt to load the model and tokenizer in this order:
// 1. From local file system cache
// 2. Download and cache from remote host (hf.co)
// 3. Download and cache from fallback host (cdn.useanything.com)
// 3. Download and cache from fallback host (cdn.anythingllm.com)
await this.#getPreTrainedModel();
await this.#getPreTrainedTokenizer();
}

View file

@ -1,4 +1,6 @@
const { SystemSettings } = require("../../../../models/systemSettings");
const { TokenManager } = require("../../../helpers/tiktoken");
const tiktoken = new TokenManager();
const webBrowsing = {
name: "web-browsing",
@ -12,6 +14,11 @@ const webBrowsing = {
aibitat.function({
super: aibitat,
name: this.name,
countTokens: (string) =>
tiktoken
.countFromString(string)
.toString()
.replace(/\B(?=(\d{3})+(?!\d))/g, ","),
description:
"Searches for a given query using a search engine to get better results for the user query.",
examples: [
@ -89,6 +96,18 @@ const webBrowsing = {
return await this[engine](query);
},
/**
* Utility function to truncate a string to a given length for debugging
* calls to the API while keeping the actual values mostly intact
* @param {string} str - The string to truncate
* @param {number} length - The length to truncate the string to
* @returns {string} The truncated string
*/
middleTruncate(str, length = 5) {
if (str.length <= length) return str;
return `${str.slice(0, length)}...${str.slice(-length)}`;
},
/**
* Use Google Custom Search Engines
* Free to set up, easy to use, 100 calls/day
@ -115,7 +134,12 @@ const webBrowsing = {
}"`
);
const data = await fetch(searchURL)
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ key: this.middleTruncate(process.env.AGENT_GSE_KEY, 5), cx: this.middleTruncate(process.env.AGENT_GSE_CTX, 5), q: query })}`
);
})
.then((searchResult) => searchResult?.items || [])
.then((items) => {
return items.map((item) => {
@ -127,16 +151,20 @@ const webBrowsing = {
});
})
.catch((e) => {
console.log(e);
this.super.handlerProps.log(
`${this.name}: Google Search Error: ${e.message}`
);
return [];
});
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
/**
@ -173,11 +201,17 @@ const webBrowsing = {
"X-SearchApi-Source": "AnythingLLM",
},
})
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ auth: this.middleTruncate(process.env.AGENT_SEARCHAPI_API_KEY, 5), q: query })}`
);
})
.then((data) => {
return { response: data, error: null };
})
.catch((e) => {
this.super.handlerProps.log(`SearchApi Error: ${e.message}`);
return { response: null, error: e.message };
});
if (error)
@ -199,10 +233,12 @@ const webBrowsing = {
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
/**
@ -235,11 +271,17 @@ const webBrowsing = {
redirect: "follow",
}
)
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ auth: this.middleTruncate(process.env.AGENT_SERPER_DEV_KEY, 5), q: query })}`
);
})
.then((data) => {
return { response: data, error: null };
})
.catch((e) => {
this.super.handlerProps.log(`Serper.dev Error: ${e.message}`);
return { response: null, error: e.message };
});
if (error)
@ -259,10 +301,12 @@ const webBrowsing = {
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
_bingWebSearch: async function (query) {
if (!process.env.AGENT_BING_SEARCH_API_KEY) {
@ -289,7 +333,12 @@ const webBrowsing = {
process.env.AGENT_BING_SEARCH_API_KEY,
},
})
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ auth: this.middleTruncate(process.env.AGENT_BING_SEARCH_API_KEY, 5), q: query })}`
);
})
.then((data) => {
const searchResults = data.webPages?.value || [];
return searchResults.map((result) => ({
@ -299,16 +348,20 @@ const webBrowsing = {
}));
})
.catch((e) => {
console.log(e);
this.super.handlerProps.log(
`Bing Web Search Error: ${e.message}`
);
return [];
});
if (searchResponse.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(searchResponse);
this.super.introspect(
`${this.caller}: I found ${searchResponse.length} results - looking over them now.`
`${this.caller}: I found ${searchResponse.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(searchResponse);
return result;
},
_serplyEngine: async function (
query,
@ -353,20 +406,24 @@ const webBrowsing = {
"X-User-Agent": device_type,
},
})
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ auth: this.middleTruncate(process.env.AGENT_SERPLY_API_KEY, 5), q: query })}`
);
})
.then((data) => {
if (data?.message === "Unauthorized") {
return {
response: null,
error:
"Unauthorized. Please double check your AGENT_SERPLY_API_KEY",
};
}
if (data?.message === "Unauthorized")
throw new Error(
"Unauthorized. Please double check your AGENT_SERPLY_API_KEY"
);
return { response: data, error: null };
})
.catch((e) => {
this.super.handlerProps.log(`Serply Error: ${e.message}`);
return { response: null, error: e.message };
});
if (error)
return `There was an error searching for content. ${error}`;
@ -382,10 +439,12 @@ const webBrowsing = {
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
_searXNGEngine: async function (query) {
let searchURL;
@ -421,11 +480,19 @@ const webBrowsing = {
"User-Agent": "anything-llm",
},
})
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ url: searchURL.toString() })}`
);
})
.then((data) => {
return { response: data, error: null };
})
.catch((e) => {
this.super.handlerProps.log(
`SearXNG Search Error: ${e.message}`
);
return { response: null, error: e.message };
});
if (error)
@ -444,10 +511,12 @@ const webBrowsing = {
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
_tavilySearch: async function (query) {
if (!process.env.AGENT_TAVILY_API_KEY) {
@ -474,11 +543,19 @@ const webBrowsing = {
query: query,
}),
})
.then((res) => res.json())
.then((res) => {
if (res.ok) return res.json();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ auth: this.middleTruncate(process.env.AGENT_TAVILY_API_KEY, 5), q: query })}`
);
})
.then((data) => {
return { response: data, error: null };
})
.catch((e) => {
this.super.handlerProps.log(
`Tavily Search Error: ${e.message}`
);
return { response: null, error: e.message };
});
@ -497,10 +574,12 @@ const webBrowsing = {
if (data.length === 0)
return `No information was found online for the search query.`;
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
_duckDuckGoEngine: async function (query) {
this.super.introspect(
@ -512,15 +591,23 @@ const webBrowsing = {
const searchURL = new URL("https://html.duckduckgo.com/html");
searchURL.searchParams.append("q", query);
const response = await fetch(searchURL.toString());
const response = await fetch(searchURL.toString())
.then((res) => {
if (res.ok) return res.text();
throw new Error(
`${res.status} - ${res.statusText}. params: ${JSON.stringify({ url: searchURL.toString() })}`
);
})
.catch((e) => {
this.super.handlerProps.log(
`DuckDuckGo Search Error: ${e.message}`
);
return null;
});
if (!response.ok) {
return `There was an error searching DuckDuckGo. Status: ${response.status}`;
}
const html = await response.text();
if (!response) return `There was an error searching DuckDuckGo.`;
const html = response;
const data = [];
const results = html.split('<div class="result results_links');
// Skip first element since it's before the first result
@ -556,11 +643,11 @@ const webBrowsing = {
return `No information was found online for the search query.`;
}
const result = JSON.stringify(data);
this.super.introspect(
`${this.caller}: I found ${data.length} results - looking over them now.`
`${this.caller}: I found ${data.length} results - reviewing the results now. (~${this.countTokens(result)} tokens)`
);
return JSON.stringify(data);
return result;
},
});
},

View file

@ -145,7 +145,7 @@ async function openAiModels(apiKey = null) {
.filter(
(model) =>
(model.id.includes("gpt") && !model.id.startsWith("ft:")) ||
model.id.includes("o1")
model.id.startsWith("o") // o1, o1-mini, o3, etc
)
.filter(
(model) =>

View file

@ -1,10 +1,36 @@
const { getEncodingNameForModel, getEncoding } = require("js-tiktoken");
/**
* @class TokenManager
*
* @notice
* We cannot do estimation of tokens here like we do in the collector
* because we need to know the model to do it.
* Other issues are we also do reverse tokenization here for the chat history during cannonballing.
* So here we are stuck doing the actual tokenization and encoding until we figure out what to do with prompt overflows.
*/
class TokenManager {
static instance = null;
static currentModel = null;
constructor(model = "gpt-3.5-turbo") {
if (TokenManager.instance && TokenManager.currentModel === model) {
this.log("Returning existing instance for model:", model);
return TokenManager.instance;
}
this.model = model;
this.encoderName = this.#getEncodingFromModel(model);
this.encoder = getEncoding(this.encoderName);
TokenManager.instance = this;
TokenManager.currentModel = model;
this.log("Initialized new TokenManager instance for model:", model);
return this;
}
log(text, ...args) {
console.log(`\x1b[35m[TokenManager]\x1b[0m ${text}`, ...args);
}
#getEncodingFromModel(model) {
@ -15,9 +41,11 @@ class TokenManager {
}
}
// Pass in an empty array of disallowedSpecials to handle all tokens as text and to be tokenized.
// https://github.com/openai/tiktoken/blob/9e79899bc248d5313c7dd73562b5e211d728723d/tiktoken/core.py#L91C20-L91C38
// Returns number[]
/**
* Pass in an empty array of disallowedSpecials to handle all tokens as text and to be tokenized.
* @param {string} input
* @returns {number[]}
*/
tokensFromString(input = "") {
try {
const tokens = this.encoder.encode(String(input), undefined, []);
@ -28,17 +56,31 @@ class TokenManager {
}
}
/**
* Converts an array of tokens back to a string.
* @param {number[]} tokens
* @returns {string}
*/
bytesFromTokens(tokens = []) {
const bytes = this.encoder.decode(tokens);
return bytes;
}
// Returns number
/**
* Counts the number of tokens in a string.
* @param {string} input
* @returns {number}
*/
countFromString(input = "") {
const tokens = this.tokensFromString(input);
return tokens.length;
}
/**
* Estimates the number of tokens in a string or array of strings.
* @param {string | string[]} input
* @returns {number}
*/
statsFrom(input) {
if (typeof input === "string") return this.countFromString(input);

View file

@ -35,6 +35,15 @@ const KEY_MAPPING = {
envKey: "EMBEDDING_MODEL_PREF",
checks: [isNotEmpty],
},
AzureOpenAiModelType: {
envKey: "AZURE_OPENAI_MODEL_TYPE",
checks: [
(input) =>
["default", "reasoning"].includes(input)
? null
: "Invalid model type. Must be one of: default, reasoning.",
],
},
// Anthropic Settings
AnthropicApiKey: {