diff --git a/server/utils/AiProviders/anthropic/index.js b/server/utils/AiProviders/anthropic/index.js
index 4da56bf9a..ad3694d88 100644
--- a/server/utils/AiProviders/anthropic/index.js
+++ b/server/utils/AiProviders/anthropic/index.js
@@ -66,13 +66,6 @@ class AnthropicLLM {
     return validModels.includes(modelName);
   }
 
-  // Moderation can be done with Anthropic, but its not really "exact" so we skip it
-  // https://docs.anthropic.com/claude/docs/content-moderation
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   constructPrompt({
     systemPrompt = "",
     contextTexts = [],
diff --git a/server/utils/AiProviders/azureOpenAi/index.js b/server/utils/AiProviders/azureOpenAi/index.js
index 3678567e3..231d9c04c 100644
--- a/server/utils/AiProviders/azureOpenAi/index.js
+++ b/server/utils/AiProviders/azureOpenAi/index.js
@@ -72,11 +72,6 @@ class AzureOpenAiLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented by Azure OpenAI so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = [], { temperature = 0.7 }) {
     if (!this.model)
       throw new Error(
diff --git a/server/utils/AiProviders/cohere/index.js b/server/utils/AiProviders/cohere/index.js
index 6357d503e..f57d2bc5c 100644
--- a/server/utils/AiProviders/cohere/index.js
+++ b/server/utils/AiProviders/cohere/index.js
@@ -102,11 +102,6 @@ class CohereLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index ef1845801..e49f6b8b9 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -112,12 +112,6 @@ class GeminiLLM {
     return validModels.includes(modelName);
   }
 
-  // Moderation cannot be done with Gemini.
-  // Not implemented so must be stubbed
-  async isSafe(_input = "") {
-    return { safe: true, reasons: [] };
-  }
-
   constructPrompt({
     systemPrompt = "",
     contextTexts = [],
diff --git a/server/utils/AiProviders/genericOpenAi/index.js b/server/utils/AiProviders/genericOpenAi/index.js
index 8d0c0f34a..7c027c434 100644
--- a/server/utils/AiProviders/genericOpenAi/index.js
+++ b/server/utils/AiProviders/genericOpenAi/index.js
@@ -83,11 +83,6 @@ class GenericOpenAiLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const result = await this.openai.chat.completions
       .create({
diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js
index ba0812194..067c60c7a 100644
--- a/server/utils/AiProviders/groq/index.js
+++ b/server/utils/AiProviders/groq/index.js
@@ -85,11 +85,6 @@ class GroqLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/huggingface/index.js b/server/utils/AiProviders/huggingface/index.js
index d25725c2a..ec256a292 100644
--- a/server/utils/AiProviders/huggingface/index.js
+++ b/server/utils/AiProviders/huggingface/index.js
@@ -79,11 +79,6 @@ class HuggingFaceLLM {
     ];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const result = await this.openai.createChatCompletion({
       model: this.model,
diff --git a/server/utils/AiProviders/koboldCPP/index.js b/server/utils/AiProviders/koboldCPP/index.js
index 151636484..f29b65879 100644
--- a/server/utils/AiProviders/koboldCPP/index.js
+++ b/server/utils/AiProviders/koboldCPP/index.js
@@ -79,11 +79,6 @@ class KoboldCPPLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const result = await this.openai.chat.completions
       .create({
diff --git a/server/utils/AiProviders/liteLLM/index.js b/server/utils/AiProviders/liteLLM/index.js
index 28d0b71dc..884049876 100644
--- a/server/utils/AiProviders/liteLLM/index.js
+++ b/server/utils/AiProviders/liteLLM/index.js
@@ -78,11 +78,6 @@ class LiteLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const result = await this.openai.chat.completions
       .create({
diff --git a/server/utils/AiProviders/lmStudio/index.js b/server/utils/AiProviders/lmStudio/index.js
index 6345fb04e..bdafd0d63 100644
--- a/server/utils/AiProviders/lmStudio/index.js
+++ b/server/utils/AiProviders/lmStudio/index.js
@@ -76,11 +76,6 @@ class LMStudioLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!this.model)
       throw new Error(
diff --git a/server/utils/AiProviders/localAi/index.js b/server/utils/AiProviders/localAi/index.js
index 8bc4d047d..be3f2516f 100644
--- a/server/utils/AiProviders/localAi/index.js
+++ b/server/utils/AiProviders/localAi/index.js
@@ -66,11 +66,6 @@ class LocalAiLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/mistral/index.js b/server/utils/AiProviders/mistral/index.js
index c2546299f..92cc63f5a 100644
--- a/server/utils/AiProviders/mistral/index.js
+++ b/server/utils/AiProviders/mistral/index.js
@@ -62,10 +62,6 @@ class MistralLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_ = "") {
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/native/index.js b/server/utils/AiProviders/native/index.js
index 0cbd87476..630cc9ea1 100644
--- a/server/utils/AiProviders/native/index.js
+++ b/server/utils/AiProviders/native/index.js
@@ -117,11 +117,6 @@ class NativeLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const model = await this.#llamaClient({ temperature });
     const response = await model.call(messages);
diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js
index 609a7601b..5c9f24f1e 100644
--- a/server/utils/AiProviders/ollama/index.js
+++ b/server/utils/AiProviders/ollama/index.js
@@ -99,11 +99,6 @@ class OllamaAILLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const model = this.#ollamaClient({ temperature });
     const textResponse = await model
diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js
index 9e55074c3..1c79aac12 100644
--- a/server/utils/AiProviders/openAi/index.js
+++ b/server/utils/AiProviders/openAi/index.js
@@ -86,37 +86,6 @@ class OpenAiLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(input = "") {
-    const { flagged = false, categories = {} } = await this.openai.moderations
-      .create({ input })
-      .then((res) => {
-        if (!res.hasOwnProperty("results"))
-          throw new Error("OpenAI moderation: No results!");
-        if (res.results.length === 0)
-          throw new Error("OpenAI moderation: No results length!");
-        return res.results[0];
-      })
-      .catch((error) => {
-        throw new Error(
-          `OpenAI::CreateModeration failed with: ${error.message}`
-        );
-      });
-
-    if (!flagged) return { safe: true, reasons: [] };
-    const reasons = Object.keys(categories)
-      .map((category) => {
-        const value = categories[category];
-        if (value === true) {
-          return category.replace("/", " or ");
-        } else {
-          return null;
-        }
-      })
-      .filter((reason) => !!reason);
-
-    return { safe: false, reasons };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/openRouter/index.js b/server/utils/AiProviders/openRouter/index.js
index 301ed7046..53e71c973 100644
--- a/server/utils/AiProviders/openRouter/index.js
+++ b/server/utils/AiProviders/openRouter/index.js
@@ -124,11 +124,6 @@ class OpenRouterLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/perplexity/index.js b/server/utils/AiProviders/perplexity/index.js
index d821b2e37..438306638 100644
--- a/server/utils/AiProviders/perplexity/index.js
+++ b/server/utils/AiProviders/perplexity/index.js
@@ -75,11 +75,6 @@ class PerplexityLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/AiProviders/textGenWebUI/index.js b/server/utils/AiProviders/textGenWebUI/index.js
index 71d505777..484a35c8b 100644
--- a/server/utils/AiProviders/textGenWebUI/index.js
+++ b/server/utils/AiProviders/textGenWebUI/index.js
@@ -76,11 +76,6 @@ class TextGenWebUILLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     const result = await this.openai.chat.completions
       .create({
diff --git a/server/utils/AiProviders/togetherAi/index.js b/server/utils/AiProviders/togetherAi/index.js
index cdfef3397..5d25edf9e 100644
--- a/server/utils/AiProviders/togetherAi/index.js
+++ b/server/utils/AiProviders/togetherAi/index.js
@@ -73,11 +73,6 @@ class TogetherAiLLM {
     return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
   }
 
-  async isSafe(_input = "") {
-    // Not implemented so must be stubbed
-    return { safe: true, reasons: [] };
-  }
-
   async getChatCompletion(messages = null, { temperature = 0.7 }) {
     if (!(await this.isValidChatCompletionModel(this.model)))
       throw new Error(
diff --git a/server/utils/chats/embed.js b/server/utils/chats/embed.js
index 8488aedd5..adbe7e9bd 100644
--- a/server/utils/chats/embed.js
+++ b/server/utils/chats/embed.js
@@ -33,20 +33,6 @@ async function streamChatWithForEmbed(
     model: chatModel ?? embed.workspace?.chatModel,
   });
   const VectorDb = getVectorDbClass();
-  const { safe, reasons = [] } = await LLMConnector.isSafe(message);
-  if (!safe) {
-    writeResponseChunk(response, {
-      id: uuid,
-      type: "abort",
-      textResponse: null,
-      sources: [],
-      close: true,
-      error: `This message was moderated and will not be allowed. Violations for ${reasons.join(
-        ", "
-      )} found.`,
-    });
-    return;
-  }
 
   const messageLimit = 20;
   const hasVectorizedSpace = await VectorDb.hasNamespace(embed.workspace.slug);
diff --git a/server/utils/chats/index.js b/server/utils/chats/index.js
index f3e0baae2..2068d3511 100644
--- a/server/utils/chats/index.js
+++ b/server/utils/chats/index.js
@@ -56,19 +56,6 @@ async function chatWithWorkspace(
     model: workspace?.chatModel,
   });
   const VectorDb = getVectorDbClass();
-  const { safe, reasons = [] } = await LLMConnector.isSafe(message);
-  if (!safe) {
-    return {
-      id: uuid,
-      type: "abort",
-      textResponse: null,
-      sources: [],
-      close: true,
-      error: `This message was moderated and will not be allowed. Violations for ${reasons.join(
-        ", "
-      )} found.`,
-    };
-  }
 
   const messageLimit = workspace?.openAiHistory || 20;
   const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js
index 770e6cb6b..a3f4f3b1d 100644
--- a/server/utils/chats/stream.js
+++ b/server/utils/chats/stream.js
@@ -53,20 +53,6 @@ async function streamChatWithWorkspace(
     model: workspace?.chatModel,
   });
   const VectorDb = getVectorDbClass();
-  const { safe, reasons = [] } = await LLMConnector.isSafe(message);
-  if (!safe) {
-    writeResponseChunk(response, {
-      id: uuid,
-      type: "abort",
-      textResponse: null,
-      sources: [],
-      close: true,
-      error: `This message was moderated and will not be allowed. Violations for ${reasons.join(
-        ", "
-      )} found.`,
-    });
-    return;
-  }
 
   const messageLimit = workspace?.openAiHistory || 20;
   const hasVectorizedSpace = await VectorDb.hasNamespace(workspace.slug);
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 302ec9589..439400696 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -1,3 +1,46 @@
+/**
+ * @typedef {Object} BaseLLMProvider - A basic llm provider object
+ * @property {Function} streamingEnabled - Checks if streaming is enabled for chat completions.
+ * @property {Function} promptWindowLimit - Returns the token limit for the current model.
+ * @property {Function} isValidChatCompletionModel - Validates if the provided model is suitable for chat completion.
+ * @property {Function} constructPrompt - Constructs a formatted prompt for the chat completion request.
+ * @property {Function} getChatCompletion - Gets a chat completion response from OpenAI.
+ * @property {Function} streamGetChatCompletion - Streams a chat completion response from OpenAI.
+ * @property {Function} handleStream - Handles the streaming response.
+ * @property {Function} embedTextInput - Embeds the provided text input using the specified embedder.
+ * @property {Function} embedChunks - Embeds multiple chunks of text using the specified embedder.
+ * @property {Function} compressMessages - Compresses chat messages to fit within the token limit.
+ */
+
+/**
+ * @typedef {Object} BaseVectorDatabaseProvider
+ * @property {string} name - The name of the Vector Database instance.
+ * @property {Function} connect - Connects to the Vector Database client.
+ * @property {Function} totalVectors - Returns the total number of vectors in the database.
+ * @property {Function} namespaceCount - Returns the count of vectors in a given namespace.
+ * @property {Function} similarityResponse - Performs a similarity search on a given namespace.
+ * @property {Function} namespace - Retrieves the specified namespace collection.
+ * @property {Function} hasNamespace - Checks if a namespace exists.
+ * @property {Function} namespaceExists - Verifies if a namespace exists in the client.
+ * @property {Function} deleteVectorsInNamespace - Deletes all vectors in a specified namespace.
+ * @property {Function} deleteDocumentFromNamespace - Deletes a document from a specified namespace.
+ * @property {Function} addDocumentToNamespace - Adds a document to a specified namespace.
+ * @property {Function} performSimilaritySearch - Performs a similarity search in the namespace.
+ */
+
+/**
+ * @typedef {Object} BaseEmbedderProvider
+ * @property {string} model - The model used for embedding.
+ * @property {number} maxConcurrentChunks - The maximum number of chunks processed concurrently.
+ * @property {number} embeddingMaxChunkLength - The maximum length of each chunk for embedding.
+ * @property {Function} embedTextInput - Embeds a single text input.
+ * @property {Function} embedChunks - Embeds multiple chunks of text.
+ */
+
+/**
+ * Gets the systems current vector database provider.
+ * @returns { BaseVectorDatabaseProvider}
+ */
 function getVectorDbClass() {
   const vectorSelection = process.env.VECTOR_DB || "lancedb";
   switch (vectorSelection) {
@@ -30,6 +73,11 @@ function getVectorDbClass() {
   }
 }
 
+/**
+ * Returns the LLMProvider with its embedder attached via system or via defined provider.
+ * @param {{provider: string | null, model: string | null} | null} params - Initialize params for LLMs provider
+ * @returns {BaseLLMProvider}
+ */
 function getLLMProvider({ provider = null, model = null } = {}) {
   const LLMSelection = provider ?? process.env.LLM_PROVIDER ?? "openai";
   const embedder = getEmbeddingEngineSelection();
@@ -99,6 +147,10 @@ function getLLMProvider({ provider = null, model = null } = {}) {
   }
 }
 
+/**
+ * Returns the EmbedderProvider by itself to whatever is currently in the system settings.
+ * @returns {BaseEmbedderProvider}
+ */
 function getEmbeddingEngineSelection() {
   const { NativeEmbedder } = require("../EmbeddingEngines/native");
   const engineSelection = process.env.EMBEDDING_ENGINE;