diff --git a/.vscode/settings.json b/.vscode/settings.json
index eecaa83fd..110c4fa6e 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -15,19 +15,24 @@
     "epub",
     "GROQ",
     "hljs",
+    "huggingface",
     "inferencing",
+    "koboldcpp",
     "Langchain",
     "lmstudio",
+    "localai",
     "mbox",
     "Milvus",
     "Mintplex",
     "moderations",
     "Ollama",
+    "Oobabooga",
     "openai",
     "opendocument",
     "openrouter",
     "Qdrant",
     "Serper",
+    "textgenwebui",
     "togetherai",
     "vectordbs",
     "Weaviate",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 400eef02d..ef260dec1 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -15,6 +15,16 @@ const ENABLED_PROVIDERS = [
   "azure",
   "koboldcpp",
   "togetherai",
+  "openrouter",
+  "mistral",
+  "perplexity",
+  "textgenwebui",
+  // TODO: More agent support.
+  // "generic-openai", // Need to support text-input for agent model input for this to be enabled.
+  // "cohere",         // Has tool calling and will need to build explicit support
+  // "huggingface"     // Can be done but already has issues with no-chat templated. Needs to be tested.
+  // "gemini",         // Too rate limited and broken in several ways to use for agents.
+  // "gemini",         // Too rate limited and broken in several ways to use for agents.
 ];
 const WARN_PERFORMANCE = [
   "lmstudio",
@@ -23,6 +33,8 @@ const WARN_PERFORMANCE = [
   "koboldcpp",
   "ollama",
   "localai",
+  "openrouter",
+  "generic-openai",
 ];
 
 const LLM_DEFAULT = {
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 3413bd359..f21c4aa45 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -753,6 +753,16 @@ ${this.getHistory({ to: route.to })
         return new Providers.KoboldCPPProvider({});
       case "localai":
         return new Providers.LocalAIProvider({ model: config.model });
+      case "openrouter":
+        return new Providers.OpenRouterProvider({ model: config.model });
+      case "mistral":
+        return new Providers.MistralProvider({ model: config.model });
+      case "generic-openai":
+        return new Providers.GenericOpenAiProvider({ model: config.model });
+      case "perplexity":
+        return new Providers.PerplexityProvider({ model: config.model });
+      case "textgenwebui":
+        return new Providers.TextWebGenUiProvider({});
 
       default:
         throw new Error(
diff --git a/server/utils/agents/aibitat/providers/genericOpenAi.js b/server/utils/agents/aibitat/providers/genericOpenAi.js
new file mode 100644
index 000000000..3521bc7d0
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/genericOpenAi.js
@@ -0,0 +1,115 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The provider for the Generic OpenAI provider.
+ * Since we cannot promise the generic provider even supports tool calling
+ * which is nearly 100% likely it does not, we can just wrap it in untooled
+ * which often is far better anyway.
+ */
+class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    super();
+    const { model = "gpt-3.5-turbo" } = config;
+    const client = new OpenAI({
+      baseURL: process.env.GENERIC_OPEN_AI_BASE_PATH,
+      apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("Generic OpenAI chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("Generic OpenAI chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = GenericOpenAiProvider;
diff --git a/server/utils/agents/aibitat/providers/groq.js b/server/utils/agents/aibitat/providers/groq.js
index 3b87ba510..720d87437 100644
--- a/server/utils/agents/aibitat/providers/groq.js
+++ b/server/utils/agents/aibitat/providers/groq.js
@@ -4,6 +4,9 @@ const { RetryError } = require("../error.js");
 
 /**
  * The provider for the Groq provider.
+ * Using OpenAI tool calling with groq really sucks right now
+ * its just fast and bad. We should probably migrate this to Untooled to improve
+ * coherence.
  */
 class GroqProvider extends Provider {
   model;
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 6f8a2da0b..14748b2ec 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -7,6 +7,11 @@ const TogetherAIProvider = require("./togetherai.js");
 const AzureOpenAiProvider = require("./azure.js");
 const KoboldCPPProvider = require("./koboldcpp.js");
 const LocalAIProvider = require("./localai.js");
+const OpenRouterProvider = require("./openrouter.js");
+const MistralProvider = require("./mistral.js");
+const GenericOpenAiProvider = require("./genericOpenAi.js");
+const PerplexityProvider = require("./perplexity.js");
+const TextWebGenUiProvider = require("./textgenwebui.js");
 
 module.exports = {
   OpenAIProvider,
@@ -18,4 +23,9 @@ module.exports = {
   AzureOpenAiProvider,
   KoboldCPPProvider,
   LocalAIProvider,
+  OpenRouterProvider,
+  MistralProvider,
+  GenericOpenAiProvider,
+  PerplexityProvider,
+  TextWebGenUiProvider,
 };
diff --git a/server/utils/agents/aibitat/providers/mistral.js b/server/utils/agents/aibitat/providers/mistral.js
new file mode 100644
index 000000000..cdc2a5e75
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/mistral.js
@@ -0,0 +1,116 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The provider for the Mistral provider.
+ * Mistral limits what models can call tools and even when using those
+ * the model names change and dont match docs. When you do have the right model
+ * it still fails and is not truly OpenAI compatible so its easier to just wrap
+ * this with Untooled which 100% works since its just text & works far more reliably
+ */
+class MistralProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    super();
+    const { model = "mistral-medium" } = config;
+    const client = new OpenAI({
+      baseURL: "https://api.mistral.ai/v1",
+      apiKey: process.env.MISTRAL_API_KEY,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("LMStudio chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("LMStudio chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = MistralProvider;
diff --git a/server/utils/agents/aibitat/providers/openrouter.js b/server/utils/agents/aibitat/providers/openrouter.js
new file mode 100644
index 000000000..81297ae28
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/openrouter.js
@@ -0,0 +1,117 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The provider for the OpenRouter provider.
+ */
+class OpenRouterProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    const { model = "openrouter/auto" } = config;
+    super();
+    const client = new OpenAI({
+      baseURL: "https://openrouter.ai/api/v1",
+      apiKey: process.env.OPENROUTER_API_KEY,
+      maxRetries: 3,
+      defaultHeaders: {
+        "HTTP-Referer": "https://useanything.com",
+        "X-Title": "AnythingLLM",
+      },
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("OpenRouter chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("OpenRouter chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   * Stubbed since OpenRouter has no cost basis.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = OpenRouterProvider;
diff --git a/server/utils/agents/aibitat/providers/perplexity.js b/server/utils/agents/aibitat/providers/perplexity.js
new file mode 100644
index 000000000..29970fd06
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/perplexity.js
@@ -0,0 +1,112 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The provider for the Perplexity provider.
+ */
+class PerplexityProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    super();
+    const { model = "sonar-small-online" } = config;
+    const client = new OpenAI({
+      baseURL: "https://api.perplexity.ai",
+      apiKey: process.env.PERPLEXITY_API_KEY ?? null,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("Perplexity chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("Perplexity chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = PerplexityProvider;
diff --git a/server/utils/agents/aibitat/providers/textgenwebui.js b/server/utils/agents/aibitat/providers/textgenwebui.js
new file mode 100644
index 000000000..767577d42
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/textgenwebui.js
@@ -0,0 +1,112 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The provider for the Oobabooga provider.
+ */
+class TextWebGenUiProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(_config = {}) {
+    super();
+    const client = new OpenAI({
+      baseURL: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
+      apiKey: null,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = null; // text-web-gen-ui does not have a model pref.
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("Oobabooga chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("Oobabooga chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   * Stubbed since KoboldCPP has no cost basis.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = TextWebGenUiProvider;
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 768ad8199..7851deb7b 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -115,6 +115,29 @@ class AgentHandler {
         if (!process.env.GEMINI_API_KEY)
           throw new Error("Gemini API key must be provided to use agents.");
         break;
+      case "openrouter":
+        if (!process.env.OPENROUTER_API_KEY)
+          throw new Error("OpenRouter API key must be provided to use agents.");
+        break;
+      case "mistral":
+        if (!process.env.MISTRAL_API_KEY)
+          throw new Error("Mistral API key must be provided to use agents.");
+        break;
+      case "generic-openai":
+        if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
+          throw new Error("API base path must be provided to use agents.");
+        break;
+      case "perplexity":
+        if (!process.env.PERPLEXITY_API_KEY)
+          throw new Error("Perplexity API key must be provided to use agents.");
+        break;
+      case "textgenwebui":
+        if (!process.env.TEXT_GEN_WEB_UI_BASE_PATH)
+          throw new Error(
+            "TextWebGenUI API base path must be provided to use agents."
+          );
+        break;
+
       default:
         throw new Error("No provider found to power agent cluster.");
     }
@@ -142,6 +165,16 @@ class AgentHandler {
         return "gemini-pro";
       case "localai":
         return null;
+      case "openrouter":
+        return "openrouter/auto";
+      case "mistral":
+        return "mistral-medium";
+      case "generic-openai":
+        return "gpt-3.5-turbo";
+      case "perplexity":
+        return "sonar-small-online";
+      case "textgenwebui":
+        return null;
       default:
         return "unknown";
     }