From 063401378819c68fac2352cd9b5adfdfa742448a Mon Sep 17 00:00:00 2001
From: Sean Hatfield <seanhatfield5@gmail.com>
Date: Wed, 6 Mar 2024 14:48:38 -0800
Subject: [PATCH] [FEAT] Groq LLM support (#865)

* Groq LLM support complete

* update useGetProvidersModels for groq models

* Add definiations
update comments and error log reports
add example envs

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
---
 .vscode/settings.json                         |   3 +
 docker/.env.example                           |   4 +
 .../LLMSelection/GroqAiOptions/index.jsx      |  41 ++++
 frontend/src/hooks/useGetProvidersModels.js   |   1 +
 frontend/src/media/llmprovider/groq.png       | Bin 0 -> 1450 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  14 +-
 .../Steps/DataHandling/index.jsx              |   9 +
 .../Steps/LLMPreference/index.jsx             |  12 +-
 server/.env.example                           |   4 +
 server/models/systemSettings.js               |  15 +-
 server/utils/AiProviders/groq/index.js        | 207 ++++++++++++++++++
 server/utils/helpers/index.js                 |   3 +
 server/utils/helpers/updateENV.js             |  11 +
 13 files changed, 320 insertions(+), 4 deletions(-)
 create mode 100644 frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/groq.png
 create mode 100644 server/utils/AiProviders/groq/index.js

diff --git a/.vscode/settings.json b/.vscode/settings.json
index 096f1c9f3..02e25ceed 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -4,12 +4,15 @@
     "Astra",
     "Dockerized",
     "Embeddable",
+    "GROQ",
     "hljs",
+    "inferencing",
     "Langchain",
     "Milvus",
     "Mintplex",
     "Ollama",
     "openai",
+    "openrouter",
     "Qdrant",
     "vectordbs",
     "Weaviate",
diff --git a/docker/.env.example b/docker/.env.example
index ba33bd5c0..ae4913dc4 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -61,6 +61,10 @@ GID='1000'
 # HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
 # HUGGING_FACE_LLM_TOKEN_LIMIT=8000
 
+# LLM_PROVIDER='groq'
+# GROQ_API_KEY=gsk_abcxyz
+# GROQ_MODEL_PREF=llama2-70b-4096
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
new file mode 100644
index 000000000..cc6fbbcc0
--- /dev/null
+++ b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx
@@ -0,0 +1,41 @@
+export default function GroqAiOptions({ settings }) {
+  return (
+    <div className="flex gap-x-4">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Groq API Key
+        </label>
+        <input
+          type="password"
+          name="GroqApiKey"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Groq API Key"
+          defaultValue={settings?.GroqApiKey ? "*".repeat(20) : ""}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+        />
+      </div>
+
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Selection
+        </label>
+        <select
+          name="GroqModelPref"
+          defaultValue={settings?.GroqModelPref || "llama2-70b-4096"}
+          required={true}
+          className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          {["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => {
+            return (
+              <option key={model} value={model}>
+                {model}
+              </option>
+            );
+          })}
+        </select>
+      </div>
+    </div>
+  );
+}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index 1f8cce988..b40ca01dd 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -19,6 +19,7 @@ const PROVIDER_DEFAULT_MODELS = {
   localai: [],
   ollama: [],
   togetherai: [],
+  groq: ["llama2-70b-4096", "mixtral-8x7b-32768"],
   native: [],
 };
 
diff --git a/frontend/src/media/llmprovider/groq.png b/frontend/src/media/llmprovider/groq.png
new file mode 100644
index 0000000000000000000000000000000000000000..31564145e1068131f3cf0a49766efbd68a0b4d54
GIT binary patch
literal 1450
zcmb7^do<Ju6vwquMnn^p%KJw&#h4<ayvE=+jJKvd^2l&Jj>tQT$Rm%*&tNegE6tFx
zX~(OSS86M*Ff-*bWJY9AB4p4GJG+1FUwh7;bMO7$d%x%2KR;Pc4mL7Uho!{C#AI+F
z)<vYff2ZV5QSCf5b4DZ>C%l{0cSnPN$C?LbJuzsrmhz*qR>*$4wYt2$y}dOzvyq>&
zo)P_{qe&3q`RD6y;l$`#fWt;PNp#!cIN&Y%CK2u8;F{Ovd()hO4f|a*!*N&(xA^jf
z^3Rles?r(!-?BNJ5YNSJ!lO)dG|ma>xACQ5QGoxX5DA%2OicJ#s)R4HLuc6_j|jx0
z;O7)!mAT@M)~Uxdo)zTtdcmdtSd+ESw}f(9$38TNdkzn8R2Jn?jgJb>(uZWORTk$=
zBT@1`oHOpek&s)%o~ZGd`S&W66sobV?>Q~&3ac{WHPfVvd+D-6T6JiIY@JD{sYekt
z1a8zYf4aCts~1A4$todQY7yjB#)0Ku3n(^28dnh_x7q0(SbbeDr+WH%Gf24QMe`Tf
z0&qE+M2%%+{!Q(>_o1Ui)#3Puyj%mY-%l_`YDj8d+T?MNwg}}ksa~Bl3zi_Faq-1H
zCsmj@+;q$&6NP8gy~gcIGS$7?B})_@n?%ZkBP|B~rO029cEiObRKB%BIg~jPJeRg*
zhwo|U-d^e+9db65(}A7RbiR70%yu-hxG>EDN9+!CI?M#qfWG+4as2xaUZ1c6G0$V$
z_f$JL7Rvtt_7jn?;NA9-SbZyNOcYPPzDW2;PZ8#>8azpQoRe;J8nF-N?v-We8LF$d
z5BANgPH}gse2M^`FK0t}(%)MzCw~zOY$23fldH%M^97>bh&7e6{Yi5@wh)rLWgl0)
zwaj^_oO}BP_x447>E+Va1f)9L%)rsGAWL`fJHxqvNNU^68cX17s(q_iqDYb~s`aIk
zy-6=X<pd?TnfXITyfj=!FBjLSg}Mv{0rrOgDMdgs3q#1msBf`S)%GO|dmu=rKH5wg
zqy9mY;tS5|IxbVIAZW$w5}j5B4Tc2I&qzwaT+tw2!w)R6b@K32${<$r#aVaQJP2tu
z4#*!RXQSKmQ&TN_loxD`<Rq~@N+(AaC?mdkSWKtO*KCu}J6Oz4p=H>+sw$8^h-(oB
zpyN{1OEZ?{p1C$f+=Jz$nJFp^tpq&-F*S`A<mQ5bK>(*c0_bS-0GReH@J(5|l1SIk
zlb9^Pzwdj?3d_e%yyuAH-dS~rL3s8xW>#4G^h;FN%T~+COa3>s+1$gu=lMy}Z~In)
zgfvMol0Eozac0k5=H+rvCe$VM$nN97@TSjkJjO6^Xxq&X&W%wpX4a{4@#2EiH)IGy
z<N4ljhm)zWD+UKN)s6@&c(1a_(8rwVO)nhWBQf7A{5TH7PVOygN(EL49kW*g(eN2U
z$Ni2A`tTZp+T&Sdk;Y`UZlkh_BA5|78iAG@EhMTcKKX<)^xU<-^^)_kwu6rkP|O)+
zMXVYL4!|vXQd%RCa#BCo2NOW%lKuHVoiO1Zv5Tc$XK8KJ#=c>=?bw?>p-%^>lIPY2
zV!qI2!{>Svz3FRiQ!m@XE9yUU+OA$X<ro>WVV4b@3fRzumHCa+Y}Ccj!O>4$HFRXE
zZBT5<`L3Jtb;7_M%;xnN?IxvM7wTO4e%X)XntK<_Sft!Kd*c>k?Fnkx1MJU?E*j@2
z!r6lkL4oz0ZG%_E1mdkp--dRAVrP;+MKg{R5<Xeh_yz1MP71d%DdJDncqJk%_oGVi
z513J3;T4H2Mv|F#WJ~=(C-=Ncc~sb#VZYM6bm;sAT4-0o{{+)N81v4Wo?0<j<?D?K
WSFKd7nO{Y?6vJ6LU>h*Lseb|K=B&j4

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index ac4c1e3d1..c7b6fb7bd 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -16,6 +16,7 @@ import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
+import GroqLogo from "@/media/llmprovider/groq.png";
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@@ -28,11 +29,12 @@ import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
 import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
 import MistralOptions from "@/components/LLMSelection/MistralOptions";
 import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
+import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
+import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
+import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { MagnifyingGlass } from "@phosphor-icons/react";
-import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
-import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
 
 export default function GeneralLLMPreference() {
   const [saving, setSaving] = useState(false);
@@ -173,6 +175,14 @@ export default function GeneralLLMPreference() {
       options: <OpenRouterOptions settings={settings} />,
       description: "A unified interface for LLMs.",
     },
+    {
+      name: "Groq",
+      value: "groq",
+      logo: GroqLogo,
+      options: <GroqAiOptions settings={settings} />,
+      description:
+        "The fastest LLM inferencing available for real-time AI applications.",
+    },
     {
       name: "Native",
       value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 5beec3c17..af3b3a9d0 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -13,6 +13,7 @@ import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
+import GroqLogo from "@/media/llmprovider/groq.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
 import AstraDBLogo from "@/media/vectordbs/astraDB.png";
 import ChromaLogo from "@/media/vectordbs/chroma.png";
@@ -127,6 +128,14 @@ const LLM_SELECTION_PRIVACY = {
     ],
     logo: OpenRouterLogo,
   },
+  groq: {
+    name: "Groq",
+    description: [
+      "Your chats will not be used for training",
+      "Your prompts and document text used in response creation are visible to Groq",
+    ],
+    logo: GroqLogo,
+  },
 };
 
 const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 433914ae6..33883dc7f 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -13,6 +13,7 @@ import MistralLogo from "@/media/llmprovider/mistral.jpeg";
 import HuggingFaceLogo from "@/media/llmprovider/huggingface.png";
 import PerplexityLogo from "@/media/llmprovider/perplexity.png";
 import OpenRouterLogo from "@/media/llmprovider/openrouter.jpeg";
+import GroqLogo from "@/media/llmprovider/groq.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
 import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
 import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
@@ -25,12 +26,13 @@ import MistralOptions from "@/components/LLMSelection/MistralOptions";
 import HuggingFaceOptions from "@/components/LLMSelection/HuggingFaceOptions";
 import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
 import PerplexityOptions from "@/components/LLMSelection/PerplexityOptions";
+import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
+import GroqAiOptions from "@/components/LLMSelection/GroqAiOptions";
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
 import paths from "@/utils/paths";
 import showToast from "@/utils/toast";
 import { useNavigate } from "react-router-dom";
-import OpenRouterOptions from "@/components/LLMSelection/OpenRouterOptions";
 
 const TITLE = "LLM Preference";
 const DESCRIPTION =
@@ -147,6 +149,14 @@ export default function LLMPreference({
       options: <OpenRouterOptions settings={settings} />,
       description: "A unified interface for LLMs.",
     },
+    {
+      name: "Groq",
+      value: "groq",
+      logo: GroqLogo,
+      options: <GroqAiOptions settings={settings} />,
+      description:
+        "The fastest LLM inferencing available for real-time AI applications.",
+    },
     {
       name: "Native",
       value: "native",
diff --git a/server/.env.example b/server/.env.example
index 0ca826e89..88e60182c 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -58,6 +58,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
 # HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
 # HUGGING_FACE_LLM_TOKEN_LIMIT=8000
 
+# LLM_PROVIDER='groq'
+# GROQ_API_KEY=gsk_abcxyz
+# GROQ_MODEL_PREF=llama2-70b-4096
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 31d5c59a8..b06fe1230 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -219,12 +219,25 @@ const SystemSettings = {
             AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
           }
         : {}),
+
+      ...(llmProvider === "groq"
+        ? {
+            GroqApiKey: !!process.env.GROQ_API_KEY,
+            GroqModelPref: process.env.GROQ_MODEL_PREF,
+
+            // For embedding credentials when groq is selected.
+            OpenAiKey: !!process.env.OPEN_AI_KEY,
+            AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
+            AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
+            AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
+          }
+        : {}),
       ...(llmProvider === "native"
         ? {
             NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
             NativeLLMTokenLimit: process.env.NATIVE_LLM_MODEL_TOKEN_LIMIT,
 
-            // For embedding credentials when ollama is selected.
+            // For embedding credentials when native is selected.
             OpenAiKey: !!process.env.OPEN_AI_KEY,
             AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
             AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js
new file mode 100644
index 000000000..1b15fe1fe
--- /dev/null
+++ b/server/utils/AiProviders/groq/index.js
@@ -0,0 +1,207 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const { chatPrompt } = require("../../chats");
+const { handleDefaultStreamResponse } = require("../../helpers/chat/responses");
+
+class GroqLLM {
+  constructor(embedder = null, modelPreference = null) {
+    const { Configuration, OpenAIApi } = require("openai");
+    if (!process.env.GROQ_API_KEY) throw new Error("No Groq API key was set.");
+
+    const config = new Configuration({
+      basePath: "https://api.groq.com/openai/v1",
+      apiKey: process.env.GROQ_API_KEY,
+    });
+
+    this.openai = new OpenAIApi(config);
+    this.model =
+      modelPreference || process.env.GROQ_MODEL_PREF || "llama2-70b-4096";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    this.embedder = !embedder ? new NativeEmbedder() : embedder;
+    this.defaultTemp = 0.7;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamChat" in this && "streamGetChatCompletion" in this;
+  }
+
+  promptWindowLimit() {
+    switch (this.model) {
+      case "llama2-70b-4096":
+        return 4096;
+      case "mixtral-8x7b-32768":
+        return 32_768;
+      default:
+        return 4096;
+    }
+  }
+
+  async isValidChatCompletionModel(modelName = "") {
+    const validModels = ["llama2-70b-4096", "mixtral-8x7b-32768"];
+    const isPreset = validModels.some((model) => modelName === model);
+    if (isPreset) return true;
+
+    const model = await this.openai
+      .retrieveModel(modelName)
+      .then((res) => res.data)
+      .catch(() => null);
+    return !!model;
+  }
+
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+  }
+
+  async isSafe(_input = "") {
+    // Not implemented so must be stubbed
+    return { safe: true, reasons: [] };
+  }
+
+  async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `Groq chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const textResponse = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      })
+      .then((json) => {
+        const res = json.data;
+        if (!res.hasOwnProperty("choices"))
+          throw new Error("GroqAI chat: No results!");
+        if (res.choices.length === 0)
+          throw new Error("GroqAI chat: No results length!");
+        return res.choices[0].message.content;
+      })
+      .catch((error) => {
+        throw new Error(
+          `GroqAI::createChatCompletion failed with: ${error.message}`
+        );
+      });
+
+    return textResponse;
+  }
+
+  async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `GroqAI:streamChat: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
+        n: 1,
+        messages: await this.compressMessages(
+          {
+            systemPrompt: chatPrompt(workspace),
+            userPrompt: prompt,
+            chatHistory,
+          },
+          rawHistory
+        ),
+      },
+      { responseType: "stream" }
+    );
+    return streamRequest;
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `GroqAI:chatCompletion: ${this.model} is not valid for chat completion!`
+      );
+
+    const { data } = await this.openai
+      .createChatCompletion({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.response.data.error.message);
+      });
+
+    if (!data.hasOwnProperty("choices")) return null;
+    return data.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!(await this.isValidChatCompletionModel(this.model)))
+      throw new Error(
+        `GroqAI:streamChatCompletion: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.createChatCompletion(
+      {
+        model: this.model,
+        stream: true,
+        messages,
+        temperature,
+      },
+      { responseType: "stream" }
+    );
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    return handleDefaultStreamResponse(response, stream, responseProps);
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  GroqLLM,
+};
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index a31a3e4f9..783609725 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -73,6 +73,9 @@ function getLLMProvider(modelPreference = null) {
     case "huggingface":
       const { HuggingFaceLLM } = require("../AiProviders/huggingface");
       return new HuggingFaceLLM(embedder, modelPreference);
+    case "groq":
+      const { GroqLLM } = require("../AiProviders/groq");
+      return new GroqLLM(embedder, modelPreference);
     default:
       throw new Error("ENV: No LLM_PROVIDER value found in environment!");
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 1ca936820..575f80ab9 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -259,6 +259,16 @@ const KEY_MAPPING = {
     checks: [isNotEmpty],
   },
 
+  // Groq Options
+  GroqApiKey: {
+    envKey: "GROQ_API_KEY",
+    checks: [isNotEmpty],
+  },
+  GroqModelPref: {
+    envKey: "GROQ_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
+
   // System Settings
   AuthToken: {
     envKey: "AUTH_TOKEN",
@@ -336,6 +346,7 @@ function supportedLLM(input = "") {
     "huggingface",
     "perplexity",
     "openrouter",
+    "groq",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }