diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 5fd9c8e33..d0b0b4893 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -24,6 +24,7 @@ const ENABLED_PROVIDERS = [
   "bedrock",
   "fireworksai",
   "deepseek",
+  "litellm",
   "apipie",
   // TODO: More agent support.
   // "cohere",         // Has tool calling and will need to build explicit support
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index cabedb7f8..0c7481982 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -785,6 +785,8 @@ ${this.getHistory({ to: route.to })
         return new Providers.FireworksAIProvider({ model: config.model });
       case "deepseek":
         return new Providers.DeepSeekProvider({ model: config.model });
+      case "litellm":
+        return new Providers.LiteLLMProvider({ model: config.model });
       case "apipie":
         return new Providers.ApiPieProvider({ model: config.model });
 
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index 5e64e8f26..afaefa1c9 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -130,6 +130,22 @@ class Provider {
           apiKey: process.env.FIREWORKS_AI_LLM_API_KEY,
           ...config,
         });
+      case "apipie":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://apipie.ai/v1",
+          },
+          apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+          ...config,
+        });
+      case "deepseek":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://api.deepseek.com/v1",
+          },
+          apiKey: process.env.DEEPSEEK_API_KEY ?? null,
+          ...config,
+        });
 
       // OSS Model Runners
       // case "anythingllm_ollama":
@@ -174,22 +190,15 @@ class Provider {
           apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? "not-used",
           ...config,
         });
-      case "deepseek":
+      case "litellm":
         return new ChatOpenAI({
           configuration: {
-            baseURL: "https://api.deepseek.com/v1",
+            baseURL: process.env.LITE_LLM_BASE_PATH,
           },
-          apiKey: process.env.DEEPSEEK_API_KEY ?? null,
-          ...config,
-        });
-      case "apipie":
-        return new ChatOpenAI({
-          configuration: {
-            baseURL: "https://apipie.ai/v1",
-          },
-          apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
+          apiKey: process.env.LITE_LLM_API_KEY ?? null,
           ...config,
         });
+
       default:
         throw new Error(`Unsupported provider ${provider} for this task.`);
     }
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 507bf181b..f5ae66420 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -15,6 +15,7 @@ const TextWebGenUiProvider = require("./textgenwebui.js");
 const AWSBedrockProvider = require("./bedrock.js");
 const FireworksAIProvider = require("./fireworksai.js");
 const DeepSeekProvider = require("./deepseek.js");
+const LiteLLMProvider = require("./litellm.js");
 const ApiPieProvider = require("./apipie.js");
 
 module.exports = {
@@ -35,5 +36,6 @@ module.exports = {
   TextWebGenUiProvider,
   AWSBedrockProvider,
   FireworksAIProvider,
+  LiteLLMProvider,
   ApiPieProvider,
 };
diff --git a/server/utils/agents/aibitat/providers/litellm.js b/server/utils/agents/aibitat/providers/litellm.js
new file mode 100644
index 000000000..ad489c269
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/litellm.js
@@ -0,0 +1,110 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The agent provider for LiteLLM.
+ */
+class LiteLLMProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    super();
+    const { model = null } = config;
+    const client = new OpenAI({
+      baseURL: process.env.LITE_LLM_BASE_PATH,
+      apiKey: process.env.LITE_LLM_API_KEY ?? null,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model || process.env.LITE_LLM_MODEL_PREF;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("LiteLLM chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("LiteLLM chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+      // from calling the exact same function over and over in a loop within a single chat exchange
+      // _but_ we should enable it to call previously used tools in a new chat interaction.
+      this.deduplicator.reset("runs");
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = LiteLLMProvider;
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index ffa65c753..ab86e51ff 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -166,6 +166,12 @@ class AgentHandler {
         if (!process.env.DEEPSEEK_API_KEY)
           throw new Error("DeepSeek API Key must be provided to use agents.");
         break;
+      case "litellm":
+        if (!process.env.LITE_LLM_BASE_PATH)
+          throw new Error(
+            "LiteLLM API base path and key must be provided to use agents."
+          );
+        break;
       case "apipie":
         if (!process.env.APIPIE_LLM_API_KEY)
           throw new Error("ApiPie API Key must be provided to use agents.");
@@ -216,6 +222,8 @@ class AgentHandler {
         return null;
       case "deepseek":
         return "deepseek-chat";
+      case "litellm":
+        return null;
       case "apipie":
         return null;
       default: