From 5bc96bca887e7a7f0affb7d64da720785b9ddeeb Mon Sep 17 00:00:00 2001
From: Timothy Carambat <rambat1010@gmail.com>
Date: Mon, 21 Oct 2024 16:32:49 -0700
Subject: [PATCH] Add Grok/XAI support for LLM & agents (#2517)

* Add Grok/XAI support for LLM & agents

* forgot files
---
 .vscode/settings.json                         |   1 +
 docker/.env.example                           |   4 +
 .../LLMSelection/XAiLLMOptions/index.jsx      | 114 ++++++++++++
 frontend/src/hooks/useGetProvidersModels.js   |   1 +
 frontend/src/media/llmprovider/xai.png        | Bin 0 -> 14108 bytes
 .../GeneralSettings/LLMPreference/index.jsx   |  11 ++
 .../Steps/DataHandling/index.jsx              |   8 +
 .../Steps/LLMPreference/index.jsx             |   9 +
 .../AgentConfig/AgentLLMSelection/index.jsx   |   1 +
 server/.env.example                           |   4 +
 server/models/systemSettings.js               |   4 +
 server/utils/AiProviders/modelMap.js          |   3 +
 server/utils/AiProviders/xai/index.js         | 168 ++++++++++++++++++
 server/utils/agents/aibitat/index.js          |   2 +
 .../agents/aibitat/providers/ai-provider.js   |   8 +
 .../utils/agents/aibitat/providers/index.js   |   2 +
 server/utils/agents/aibitat/providers/xai.js  | 116 ++++++++++++
 server/utils/agents/index.js                  |   6 +
 server/utils/helpers/customModels.js          |  33 ++++
 server/utils/helpers/index.js                 |   6 +
 server/utils/helpers/updateENV.js             |  11 ++
 21 files changed, 512 insertions(+)
 create mode 100644 frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx
 create mode 100644 frontend/src/media/llmprovider/xai.png
 create mode 100644 server/utils/AiProviders/xai/index.js
 create mode 100644 server/utils/agents/aibitat/providers/xai.js

diff --git a/.vscode/settings.json b/.vscode/settings.json
index 8405c5281..14efd3fae 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -53,6 +53,7 @@
     "uuidv",
     "vectordbs",
     "Weaviate",
+    "XAILLM",
     "Zilliz"
   ],
   "eslint.experimental.useFlatConfig": true,
diff --git a/docker/.env.example b/docker/.env.example
index a6cabe655..7bb07ebef 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -109,6 +109,10 @@ GID='1000'
 # APIPIE_LLM_API_KEY='sk-123abc'
 # APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
 
+# LLM_PROVIDER='xai'
+# XAI_LLM_API_KEY='xai-your-api-key-here'
+# XAI_LLM_MODEL_PREF='grok-beta'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx b/frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx
new file mode 100644
index 000000000..d760a8ba4
--- /dev/null
+++ b/frontend/src/components/LLMSelection/XAiLLMOptions/index.jsx
@@ -0,0 +1,114 @@
+import { useState, useEffect } from "react";
+import System from "@/models/system";
+
+export default function XAILLMOptions({ settings }) {
+  const [inputValue, setInputValue] = useState(settings?.XAIApiKey);
+  const [apiKey, setApiKey] = useState(settings?.XAIApiKey);
+
+  return (
+    <div className="flex gap-[36px] mt-1.5">
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          xAI API Key
+        </label>
+        <input
+          type="password"
+          name="XAIApiKey"
+          className="border-none bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
+          placeholder="xAI API Key"
+          defaultValue={settings?.XAIApiKey ? "*".repeat(20) : ""}
+          required={true}
+          autoComplete="off"
+          spellCheck={false}
+          onChange={(e) => setInputValue(e.target.value)}
+          onBlur={() => setApiKey(inputValue)}
+        />
+      </div>
+
+      {!settings?.credentialsOnly && (
+        <XAIModelSelection settings={settings} apiKey={apiKey} />
+      )}
+    </div>
+  );
+}
+
+function XAIModelSelection({ apiKey, settings }) {
+  const [customModels, setCustomModels] = useState([]);
+  const [loading, setLoading] = useState(true);
+
+  useEffect(() => {
+    async function findCustomModels() {
+      if (!apiKey) {
+        setCustomModels([]);
+        setLoading(true);
+        return;
+      }
+
+      try {
+        setLoading(true);
+        const { models } = await System.customModels("xai", apiKey);
+        setCustomModels(models || []);
+      } catch (error) {
+        console.error("Failed to fetch custom models:", error);
+        setCustomModels([]);
+      } finally {
+        setLoading(false);
+      }
+    }
+    findCustomModels();
+  }, [apiKey]);
+
+  if (loading) {
+    return (
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-3">
+          Chat Model Selection
+        </label>
+        <select
+          name="XAIModelPref"
+          disabled={true}
+          className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+        >
+          <option disabled={true} selected={true}>
+            --loading available models--
+          </option>
+        </select>
+        <p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
+          Enter a valid API key to view all available models for your account.
+        </p>
+      </div>
+    );
+  }
+
+  return (
+    <div className="flex flex-col w-60">
+      <label className="text-white text-sm font-semibold block mb-3">
+        Chat Model Selection
+      </label>
+      <select
+        name="XAIModelPref"
+        required={true}
+        className="border-none bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
+      >
+        {customModels.length > 0 && (
+          <optgroup label="Available models">
+            {customModels.map((model) => {
+              return (
+                <option
+                  key={model.id}
+                  value={model.id}
+                  selected={settings?.XAIModelPref === model.id}
+                >
+                  {model.id}
+                </option>
+              );
+            })}
+          </optgroup>
+        )}
+      </select>
+      <p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
+        Select the xAI model you want to use for your conversations.
+      </p>
+    </div>
+  );
+}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index ece31c2b5..a493438c7 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -49,6 +49,7 @@ const PROVIDER_DEFAULT_MODELS = {
   textgenwebui: [],
   "generic-openai": [],
   bedrock: [],
+  xai: ["grok-beta"],
 };
 
 // For providers with large model lists (e.g. togetherAi) - we subgroup the options
diff --git a/frontend/src/media/llmprovider/xai.png b/frontend/src/media/llmprovider/xai.png
new file mode 100644
index 0000000000000000000000000000000000000000..93106761e4a616f0ddf38b4cdd2292b4c029a406
GIT binary patch
literal 14108
zcmeHuc{tW<+pY#fGS5X6LS#(lA(^5Qk|dc)lJPN5m4t*8tJNe!mRf}rNfHqish$Qh
z&lNIe9<tB7*0;WQz2DyZkG=PCeE;nCXbtc4Jip<-@9R3R^E|I>#hIEIGBd7aq@kf<
zHrk<QPD8WInEb^+hfhK`@zCNQtGss{@S~w&c}D(PmMX=<PeZeE?1+UG#cKC1H7BoQ
zG7ipOjxI7m$Gq`t8XAotHT>%_7mCCBpkp4MeriFQ;y-_(hJPm?%Zh6;uK(+Q8oti1
zYUX+dfBh8yPgC5DLh)9Ul?@CGlnLA-<K=r;R$f(ARaQ<xRzX1;zmWC|_M|ujNqhQ<
z;7<R%m7a^AlkX94$`LQm_2kwLj$Zy0O>yzRfA#B?oSpuDjJLn9$Ipj2JIT6uxEyow
zr1;6o%gF!ZEuEdzC`TwBF8}u8Iv$jN`^4i2S&5p5gXdvQ@gQku7gq;=4~n=Jw#nOp
za>UWYWxe!z1qBUR@^T(W{^jbQpZxQNvj3CE{`}d$tn&YG19T`9-y@EHZ-S?vjKj}Q
z{K?MA;B93coXIxG{bL(`U8Sbu?DsD(PFAwl#pmBX!A6iTyd8Y~Tr7gUT{OkbUHrWK
zeVttX_4NO`p{}os1I5KzOF>RSQCeP3T0vez_TRqx`v5im`a(a5V&tr);;g8kpy;Y1
z?WnTVMOx9xNnYB)QQ1M-McH+WioCO&vx<_#-}m};$$z<%fg^6TRbi`w(pFV@MR{e_
zttxVVU-R?J|8j|im%o!6S(X-#@~;>A&(Hqz=l_di{^xf6#|{3ut^bb>+WTKJik$M#
zR3+z&FL1tF{&BweOijn1;^yV6WpU)#dR+%!hhvTo6b<s@|M=qH7WaQ2iU5m$#Npp>
z806snUm_4cR{I$(UcMGyULIPy4xRxH>m|%xJe^&9U3@ob$o~5;{&(;DA1L&1zX2Zo
zOEk;=jb~c-WW^L|XymCzdO8+CSBKiZ?Ef(RNNMyaW7R9u7~5>#Wk*={Ez|#Cl_}OV
z@0@%8wOV9Tuup@U*S+EzzJ&DHlax~#)9va^RdN**U-Z*1aVEMhISPNzvbkRD)T`88
zUEINu^S*y2JNu%{txvRU?AItv#=^qO{{P?p`}V-MVD);n-2yE1cPynIJa}-osE9V_
z{U&!!9UUEpRjW>H&p2*T>+{CP!IUGM?n?dX++67o4Go!jc`N~?hYGE4m~ZlIeRx7W
zaCp6n3Lj(i_Co8|YIGZ^hvoQ~f)?kt+LnbxCnmC}2(Xw9nBKC8O-wvp?;2&C-R(C1
zySzl)51O3!af^%LuU@~-eDrATv17-qY;Ad52Ba^%WaHsE{q?Jxo}ppYn>VZqmtOVw
zUlnIE&eX2xr#O0eFnM)6(YLb`YG`cKKX`p&Y)r@3SK-HE$I`lsvUZ={-mTzE(ObTm
zXZ1vgW^+r6*`Y%TA3yH1%C$6ePk!nAqPDj3ntI^5<m6=u1}oz9gpH-Ytcp6n<M>PG
z$`>yfE?>U<_T4+-=0`VdWDK`$qd5~F?=wAo=+x=cm34JHQ><U}(``&XUR1PIBVvJ1
zQ&TfDT9n33ac#<Pzhypr$Y*3^G&(iKll$BE(>g{I>fZv?U0q#qzmro_0h?uI<IQ9A
zgil!K8sL40+e%}uUKN;`n>%A(7hIh#I=8a4jpf42Q`oY{VY3|X-@pIV(!!a|bIalk
z_QI{my7iI&YI%A2%nv?kKBZ^Ba_$o26cZPx;_eR~D193mihEF(YbxR0SIga68pw(>
z)L;}{o+uF)QPoeW^elD?4qh|TUZxuusBF1^zy59U_3LSPWo`A29NFaS=a*rAOYe4{
zhLb7B;_0fc<4^4?Mn~l$e#{;|o0Jr98NW+RH!Ms;P$P&Y*E0UxIkxFbM*Gv3rW(h8
ze>;ZoSUlb2VX$-OYTGAaaW7vQ@g*3T4V-)F92*<E&C^pxQc8+lNGQ^|_E?d1K|#T?
z+aDQaWo1vDI_1r~z(*pXJSu9%W@+h+>}*<TY3Wbx?Ff|bLF@7k4Y#)1q@<>9(9~SF
zY}vBmH$wsHI8-$?wR9}BtCDu*%9U~wN$gSQUu)*PUxj!|NMQa-QNy2CuU^Fl%-6KE
z@UyeCKlC5u!AoRV-cI@u;_ZFBsp)`5@c7D?FJCe+GjmUdeF?5ezv1@=!R6(}8Wj~a
z)1Px)PDzO_zFOE?b8KwvVd%6>p|!NIuyEq8l65TfHP4@$+1VvMd7?ElJ1Z}$d0fZn
zOIo$#)81ZD>w^cWA3mJIdte8QvqvW<^<7=Xp3DtM-MDc>psDXclLyk{>+o<*OR-nM
z-Mh!JbgB@m7As#0B~Mj=<=gOZ_s84&Z{EHgWt-3Ia)@KbzxLYY%Z%9eZ{Iu+Y14=E
z%tk(a;v)}gZ%;%5nGI+pk*jlab9HjwXJloq;N;}Y++A{L@O-gXhk?F6?eY~XHe6hq
zd-EQF;9ley5h1uIO+)AK;T*T(Q?I4yHeTDA*RG6<g1&!drlY4XC@Yh;)_nT(>5G>y
zb@uJ!9v>f1EcEwQ5wNneGu*R>m7FrRkUF4|kLSuXc_`h<%gfBoWqy;U$vHX2Oylh8
z+C4u~Zs_YfZdm4pOkB&)KlZI@%booE_y_!vY4Q^NHVT(^xw*M*L(nudH0WLP72miq
z0~v%2HZV1Po6K7WhAgk8m5A7$n3$+=R`B_5u#(AGEpTdjT3%Z_sWIP97Y9CPU1E@>
zbKt-l<DENw5E0XE@BH<BTwG}D>+8P`3{<wZUX}3YK!9X)bvcl{3J(we)Y{55B#>EN
zuKn)ayV1_4E4X-gkll6Rb}iT-H$}&wAU-Um>e;hX5?kMqHF^R)JiNT{z^?i$%2w9a
z9Fz9J?(UMF9ZwR@o@H7|OS^9Nm4{MRP7XaYGc(`1b<t1l*=`vx$E79@o9kJanKR1D
z)~Q`%alL;a#oAe6O;}i1`bT%4k#<cWi{NalV}ZSs(~8N-$)M@sbpZhZdg?7k<&LKB
zE`3ppd3f*M$(%jcGjRu=c>TiN$rTmbS|9qX=<Dl?O-hO}zojug+ubd^cJ11?4GpJp
zFvGsrX|Pk@ubPPek&{FH^eMKX!SdCcH}MZ-`UeN&pFPuSeG<+e92|`3^dIq67hnl|
z{(OJFtpkOULSMIs0ReG@C1!=|{WaIFU7HzuE+nt69*;QviJ!ym%J`{%SlB{xJ%4=l
z$=TUpiobu?;{5cd=4Os;lQ}+Yw&MPgD|vY`6H`-X8s-foQ@7_@X5ny?1U0t@1<fR<
zwJKd<K6vop&D>mlUELK-bXCc`uZ(Q)&KGP3&)>d%8xI*A9L)UK^W{s-&Wkc#10Qd{
zbuStn8&e&hZD`<`o1Z7E%E;J|w=+$ST!y6r7!bQ-JxSV*Zf-k%mM2F?$I;^QMds@2
zY6b=dm3Q+sUEcP-z8v{><uz~LvOOp+o~`L!Rh52Y%S+e$8MklOwOwd&R){l?v5?fa
zu;B9b^)<DzIs56;r=*Y$65_XSug39O9XQaHGQX0Xjvv4zHI=)ts3@bLfL&c(J@ZtO
zg=BP65-YjZ);6)Xw^yXh3l;XPfe2vhZFBRP<Yaa;){6HdPPo2%ApHH?x5rt9wCp`S
zO+K_Mo0*y2{PWMsve4<yk&(#W?^RX0n`LCWg_dUMQJEgf1fN9iyz-vx6<oWvGM-mf
zPD5k!0E4uoWNq8L*yhcz7TNOazJ8T(cXw|cJRipWwU@l5!~G^XP0a+<AVp<mCqKWv
zDQY%lUNW|iV@d6nmL7V+e*p&)8XAg>+3xSZ^|y-`gZ0NwkvH_g3g_lREcWk@56RNc
zFkLS$PS4HF{o>UtJ$w7k<Tty;I3GQFL;~-`iDgqyRA~|NVq~xG-{(Y0|NZi1j$^gC
z7Lx4T+|jkQ#x|t^%&1JmQGF&k@5^Kk?A;qRJnTtjsd)dz!Svm&&yVJm<>gP<Ma<G7
zPu~tsW`4|~x}CdrEn$4zxA)7J(tL?W?ronwZA0}R7#NV#(t3SPV<R5<=Yd>HY!o>m
zBoRr_qO{&#=Y^K?MSgR0^Ta|5O+=f=z}K%8J<pR03JXP>w|Gs=;zIYPp#WfRW=YAK
zgoFfKSlQK;AyE_*8Y;4JBP}Amy07jcyMRDguODI>d4$@6@|GyJhaG8REp}nm{rmR`
zZUT?>!()}@<k)~rhAu7}5q#<9w`dl;My`N<JdRlWNpY;LySc0Pf*s^E0L0TYf={-$
z+kTx4q2~RdrK9Wau8zNX^Cl^pkITxEG_L>AI4G~7arW}%wPzC&<VE*vz(dvY#9z+M
z&Amn7R9EX$J&RuS`HBKP|NN8jz<~oGZa$Hpy;Zz%a6f)TC@L!2gE5iN{9qKx9ChA=
zyHToh#flYQzkPd#NI7eeB|&Zg2-CU!vHNKhJ$RbdcnPXmbMxBv^2ns-W^3xejdByo
zg`Ke`ERCwqzP!1fb?eru`T6<GV`oqRyKd({sZ8KkX5;4{w7p0<aPXjChpL^GRp+7M
zy=O&?)%Nl5@)DNv+iyI(cJGc0n0(XN$V;kf#G=~x_wQX_zI=($sz+v`_@W4*^f`@O
z*@Bvya}rFX-c{GjOEzL*%#a{hz`TKQ@_I2by6EWW7j<>I6v{+$Oi-?+l<VQcV_jAB
zB*0x<^i$OO>qtD2O+Z{CKOMZi*-A@GGe2y&@Uj!AQdzk@Y<{RXHX%V1umE;+_%MA@
zQBlzB_(mmV<=M{aHJ}8X^{6HO{@nil{$CL~&CQqH3{QM;-+J%fJwmg%czGi~%p2r4
zW|fx<jdYY7f?9cXTxAC!jR9NaRa9a@u<Y&aw-ugCG0w)xJPqs!k2!yy<L1YKq`XZg
zCMMl4FNpK4S#zSv<MYs!EhkIcDVeI*OeUtMd#dAkGpBlg+aVeoAFm6J#<+Slsc$cw
z?};SY(p1#c(5b1b7d&`Sb5287CG!SnorjVWO31o(>w@N|q?zcVB>Z<|o8PpMbUu8z
zry=*?j@`Sl($B>JA|4)|hjrmKauWS|zM4i;3$uP?+k%4+0D!>H5P%f`pa^Br{%?Sf
zxE^nnbcCUT!bxSX_Sp7$F;UT#cHxuUh1Nrc`OB%ZWTUE4I9=~MxVq8}4Gm$h*ic_V
zb@TIOk$v7Mb2g8I%u>{LgtflP;H>s6u5wfCM#%`8`gZAN(hkHQ7Z+D2f^Wypojob@
z0zI+kO;YOXcNZ2GrWX})U$}4~^O~-OWNPf$vxKioN=mYEaQODxt6r03s&(pp^!ma5
z``~RW%#z}<=$ZL>-E4E64AU^m9et0ysJ<;*w%|!?LPA3HO2b8xcccSTu3U-7K{&g(
zl#KYipM93DVE_n3>6yu2nv+G6TW#i{Rw{ra1Dy4Yj8<`QaIC+0{Z4rZgb-@pFO~_Q
zJ7XEYJ!2aYAU~$dMYe4D{%o;a_)Sk>h5Y#)qQl88wY7|=PoLInDVv<04r~OPvGMT*
zIWDi*I^cQqsBU<;CbC0c-73#i0+QqeU=B?GR7^~~IsLWG;yre?Yc@g;adDkGSnTP5
z1X;a$b!O3Kxl6ll=I6(t&Va#~eE56{;$>n&5s+vKUiy<0dQ0yiU>i8xs)JGnG||5m
zZNf7gA|w*0w>;ZC4#gw$#tpp>enA1H>*%7Em6etKq`k_@;>PQ)D<~_+T#uMrMc7hX
z8~2b*F<2IeETS+uEsZ7HJh;2T*GNFXOmNGkU8g%c?Xi(vH3|HHY|z`xJNui50+6p+
z85zs^`}@JN_1)ZVy6yGG=7Dp7(52%WKR>_BQ#TMM*ud@EPnJ$Iv;Owp%SW$&1Fwut
z1rcdDB!~@afZ3tNY<_-z>c@}KWO0v{tAQzxjAQ}sf!cgRLcF;-XQ5Gc>|g*-e=_@-
zUsO~y7F!5T!D@!MNlY{Vkwx5S4di!k0=Y5BV#k#rAk~B+9z6*(`~Aul-DAh3QD^TK
z7mHi-?=P}WINF#;mPNFy`7N$&7PS(U$tku=yXfrf>=3f{K#i=dtlxkCoy~>IrN|mq
zZXC+2%_GVx$y7$@3&C6m;0Fq!o0r$~-rgIA<x^g>b92HQHY`IGm5`7)jud&b>z$!W
z=EQpJ<ck+Oh|((y<%N{Y^yGO}U*FaLK8HXAB+-k@w+7jM5Mc&5G*eEs@!Dl+X(_C&
zEtH&`%w#ktN|#MvqQu<Y-R(U;b*%UEXJHi;tHQ{uI9^>{-Jqo(L=S5!&~|im7#bSV
z6LbeFHVf@oNl)K1-d8s>H<(Y9ny}Yq>dT5-#SDsTnSsy*$ol(_fX#s%hlKGGwtDOs
z^X}cdN56ju1S@7{Wo4L3B=!wns&@s)&w@jMFRcz9<Qz&cYAnco_s*P)n_CAO21mle
zWSlI)#RysF5E&^XB_$<yr9M-GhL98$6$T+8AyVgsqEuKw5c&A{wt*m$1R65oV`Do`
z&nnmnGWdKd>cndnlQMnbRAY8d&K0=xy?YAJYHH4y$Gq=<_Tz^Zl<`=1^=c*-mP!w$
zNa{Mbn5t#>o4$FnDQ(?KVu(6iCN3(Pe(xR!DoJcy-0V#qKI&5S2wO4$1hR7d)~(#d
zo^9I=3{Kxv)`8P7HaB^cLZO6>Jy>mdn;U8YD7OvaqNJqs<kQHas*Q5?z5DmaCL8Ta
z^L-Q~j2Oyw^>~oC$Y|6FkqsMk3QMbb;%O1agyW}ah0p2p2M>Mw)`{~apWsG%p?uGv
zSXZrDB_ShY5FD(kxHdt;|80NOR^#MNT#S$k-B>*IagFwmF;*A~p$n4NvEHt(4F(1V
zV2rG}ma9u_SP~4r=NOil<Z!mMv><eJoSZ}q3=MsPf+T8F4$52<0f3XW`S{3h*VPT?
ziv1x+*SBi@g_q~9UuQ)Sf|2~bOALA^^f3!07R(MNCZ=?V^CwT9;KZYBWR||&T{!lW
z4&j^F=E3HU4xuAQjy#N9(!wUFw*B#@GF@Yt&C$a?K0Y(^9ZM`USm8B^{c|=Bv%N1l
zZ+>*gS#8gvh6V7iyi!kBW}EcKA7aE&A*?_81}L=U?P^jh3kwy&Fyat4whILz+g~^<
z5VIvUl}JTUh|D%sEftjt@E9V8-@O~pX^>*3TXFfy6)KoEm1;(9{~ckg<@>1O<NRs*
zx_&=zz2K7%hi!Pdxk<5tA5w*?{@n04KtfDR%-ep7sbuPo9XpO+zuqDAs094{J~aB_
zH)&ATPd$|46)uxP&cyT#tT5xFd-Bz*VUD)`Iq#!8wu;9B$u3^x#RjzH4}v4Yk_mpy
zgAIg2AczNrU~6l;W6vJmRvI4_0f5Ff9L%HA(t<~iB#n0%i-H!o2aa5M{8%Hv?1pxN
zPVjPlJ-uyUW#rIXTB<54j!UpM3knF>L&1Zrkn9TzQdz!yd4#ezEIcN<72pJB2M@+a
zN7GV6I<Q+X9nv2^7SPw%2ZZqezxer5#LwX^D=RBU5v!=fj@So-EVGpQYh`7c9~v7;
zbgWsk2JgAlW~)wcF)giTetLuoz6m+Px_@5kmIWj7)TYFjc6y}4(8`K0r*-0@_m(d!
zeTPowWM_BPrb<c3${Oz25i3#j&{lY4XsG&)_R@lE%L`9W&&-UBr)9w+4Z4rlCJ-7Y
zAt|}t+Inqh$6LD^q5I+~@ZG$<<={Dhbb#NSZfiS4RWbm-BbEzs%;x7sHgEoxHEDdm
z38a`Ap_<f4zc9C=#vk<N%_sK7eFA1_-g&3@mz0z+u(I+FDaBFUp23?Tz6ET~J?(c_
zF)(biwB!*M5z$wDD-8@06ii%L2x~2m?C8mV@V==!U84&n6V}m*E6QHoSP%A^bK_=s
zKLLS(B(^?(c7??hrs#_VIAwWTKO#blgOgJ;!mAAlym8~khr!>)aQmkD(@*$J%JwWv
zHI4?|Ku$S@g$eZZ^khC}@+`2D0~mo{tQQek;a;>BB?Co<BW|UZmevj<BOR~~kb{Ez
z_j?K+JjVA3cnK*fT_dA7xB8Z^9*(AI%Y91(GBPs2gsX;HiVMoi37%62)(8nb*It_4
zqi0|stf3(wnOa+RK=@*vnYsD$(zZkpViKCp)9PAUf<%(*=qw8?>5uREFwf;Z*&LUY
zMC1zutX9dT?oFs2A|fJ0;XuD8K7J+hi$0~MMjIvxZjh21v^ok`m0|U2U5J)H|NQgZ
zxpNg#XC|uUCH5r0eQRp-By0_~BU5YQXo1z;Cr=UsN7^%<JQ2Kd<;rkO#G&H&%*@Qu
zdgYFEB=m_BC!TW*<T+UoTN2=e(xb0#pnsS@YCOg4_D6y)q_o30j8&>Gw9bXN+`fN5
zZ%AlpK}kt)uU})?B=#8SusFP7vb=J#rnjv$&}`qnYJVd&C7TzjF1^00q*TFp>8W8R
z$|xkHxyS!u$PnVF>!Mu*tl87$<>jG%hGG|<K7US+#w-Eb@o|OCJaxQf#StLjgjA7q
zN=WE?B)X12;_x+vyj#Z{9fbuoLr#IR-nnxpcFK3=-8&A5a5i4vm`|Uqo>f<uB+(j)
za)^qFRd#u&LoPsz;PvO~lml$&>%dRPM!R%x<m6O6e;zY<p5mc0sTFO*F9wfBLRwni
z((*UA!*ozNh~Ckup@Sbjd<au?q?FOmwgq-%+`RcU{?0z^0#U5ixjeWi^41|*s?gJP
zjcgLdYuBE^zK|D3tYm74d>$y;qNsQZA&lS^IFJuWBR(b0@64Gq-u>^g%O@%y0wYiy
zd=}^YQ&LjaD=Mahm-K(~83;dv+iy@-<^@`QMP5IBdO{-C9(jTC^|qrU>Cq!KiB#ih
zlVUw#AFoH?GN@8j&I%6h?u<%ThVGYJ+S{*x_x?R`zQCVG6O6_g5+(K&bhMwIooew)
zzI17mwKTAJ=gyr$KNeIGa(5p*_{_2$oE3%X!v_mOjqnDDxB0!(Pszq%3pSuwqc&aj
zy<jLOu}6MC4Pd1tbO@q2PKkT#rCogd{JKYvN@<17j8YGcrl+rWc6RQZn;d*mU$5z#
zWf%+oX=5V*Zfj;~`PoysE?sm_=ZASFG%a@Q+NB3`5o&Ji`y1`}hRL_C_q(cMSYbu^
zbkHvy^PZoZ>;7DS1*8|QU-kI-y-sdJIQGYn(@bj)pE|WNN&BJB7k<N(!^ct7ii?$D
zwflgZ;nkjYbl9a>!y1DB`S$($sDS}@gyFhX9wj!zp8h_A4bn(o#vf@B5fK0bXd_nd
z*>M*b6Zd76gSpCEUNhi1H*ZRSF7=>5;7w22$nd(j6N9^?L=`cz4K>SN?qQ=;w)rLt
zNpADKdwsrt-VG(j4l})+KMY7is0JX#!C`}=lhc~>y*>+x>P{!4qQ-`bJ0K5M;)j`9
zSBcRKT8q~MWm|gjLSge}CgcDiDaOXN;(_^x=<8N9GLGV+@giRi=0=G(e5uBL5uCmb
z4$ELm!x<b8T8CpL7Sp$Hdcb#B(is+#ZL;%02qy^%hJE|?$*oPWva+i9{P}aT=LHzj
z#AY-$W)$k)NR`74u`N%EvLHmYwOKE`6g4)~Luth^fUgn04Sb|hsl$CVjgJPVrj(%d
z-ZnOV(W3v;LXzla5;w?=wFxynlbj{q1L1Te9XD?5B4Gd{K~L|L+h7Q(c9$+ag-1kf
zXh6&BbK1$Jsn=7HKfXi)%&WqCHz_D^ApCCLxufUkm?=?&Tj>QGY)UnL_Uu_FHOp{#
z{*%~+mp#(X$OtHHSYzi>Qcm5uvzb7EP{Fw!q68@cxTj-c3W|!N;S9li?tXnu9rgKh
z$`!rm=^9lFi;F<5z9+USnwrwaQU`vytWcacva>;=tHA2Q?QL=`MPy`Hva_=x1_@7w
z&!2awln9fg#+?(9@)B?4kTs|~C~RR%i_<gLmu7`<gmIR8A|tg$IaUFAp+y-YBO|9n
zveD~96+<b<_IralJB6;f_SKKvr166ip5x-;!UxN-y<HH^qoi*#p|!7HCzduQqHT2K
z$gTO)39w9YtOzq~6gwMR1-<OoXZEIPE8zAKvk*xHgBH%VmRjT2Rgl_P1d2++Ja;-O
zhKS*5u$aWNig6~(heNKfKx87Hi4$6Mw2@6vP^P(R4Y)JXorKTcy__KANgp;P?`RFL
zj5A@R>r?IO>x*8^E0dm{P6J_u#uHmM3n)MOX>f^&?+=*<B|ACesX_gM_u@pQWsF`9
zaT+x7l^uwrJ$UK9>@g54m_0M2T`N#|mp+I*(Mzk`TI;6=(+NS^)YOENC-MS3OTVPZ
zskkMhad7N&JwH(lYHF9I*L{74vb@DOn*-bd94H2jwANMu?3qJ>m4<CRY_-nL^=KrW
zyL2fIVWAbKBhC>uF5uf%pv1=n`l+I!p>gk4_a=yP=pTZx-@m-+f~gWQ7JEgNavqtm
z;g^6<N#W86nP7lRj}pxjU)^JP8eoFgCOqlJ4N*OP{S68V=MTf>>2iIo$^(i+v<Mg1
zdMz#6LT`n&3B$JPXh|4kC6{(mG7-ltpFEWC2X4&t8vVw}8dR>7ix+kC8{rufDUmtR
zEPtim&E5SZP95Ek6yuE_+>4$MuAk#dlz4Lj#<oWI92dOBPfble^hob$v^q^pSRlL4
zUA{~((afFlcNZy5J2EjYZk5>pE~o2T<67L}AjkK%tt~-(Pg*CsECB5)c(I}GQZPXH
z`By{ZojJq!?D_Nfakl7Rq6f`YQ#i=z!|^fEnGN6IT(r9ya@`EVhn}9EPf*YR>;S?5
zZ6uZ~YAlv>Dmr?dYBBB{zciDyRPp*X%O8LIp%+}rK?7PxWG<Q(pv;}`vUd&_OT|5_
zWba$!9~2auoxO2kyiV5b=+P=*3%Dp1?+M&aN?dWvLULM4H#IetFvM(6^l<SgG_6R?
zp^9Y+#EQ>JV|&&Et-w~HMlv5f;DK3n*E$z}NNIwKtgNbH=i*wKZNBQ3#ek>CK~qy2
zgf3dCzt_8(ZLfv3W^Ye}tUv^!Dn0b>I*D!fbudjcYinzp>537O)7FmcZAch=1>0Hp
z<jIrR@Q~i!FR*6ftf4;KZSu&}BqU4@){aob%(C|Ob|zyE?x^vz@|Sjj;~RvAszZ$6
z5E2EZan5LSJS`0zfvQXz9rcDoQ0i5M(hlK9;0n1^4Qp$c*oEiH`>vYggo;Q?OEbsC
z#qBUQ?oQsoL5ur(t6jU>GK6m1Gu-Dbm3jhwm_42^1Ox?1FYCvTAMiiC?bcCNfsF&b
zfIM=bC0I19ozU^56`hTyb~ZMc0*IOa_#Ktb)<-m{$MyA^E{Yn`P9DD{ZL$3Pk-)&*
zL+agxrY_9LqdA)tCIgojtuD}>g(0s<U6@beKNzk%7q?JbTH<jPT6{AvR858VU<=t%
zn5wF(;5%1ANKS;5b>`dI+WwS0sCzIzrmg?DT^xXYt*xswv#>aYO{E(6gp{?n9X%Wr
z^m`-!%N{-y_<GkW1XOc#Gl@wYi}pmvngoNhiHVs|e#l_MVXK{@9MG?XeP9bwcA|hn
z(wougBtr_Mi~sH0XolUm1oDA!W!V03K<ynIx>(Dl`!Ami^!F#+8RmnEii&rn41^0j
z5wuk>eju!-w$?a%{NWG2rSm|U%F5-|*4Co~AEnIFqO-oo>ja<q)m?Uk{>Zm2tL*IT
zJlt`!pk)YC7X~n{2L=b_m6Zc~{i3&@0FXj<X5P8O<lx|tY2!ngY+fTE@a*>fd^vgf
zr?%?7@=UdRYkyh3o}Mh|61p8Z@)Wky^|>;1d(m_pt76p#{pi9|vS?~wyr@v_2#X?O
z9^(#5w1EkdXh@Jkex<&qew|tXxt;&Vd~_AIZrzH2S=c!k0IonJG8)9#QhS9<iD7xL
zZ2_Bujrl!D(9E(!NA5S>Ehsnv=A^iF>r*$y@h&E{01d-nW7sO`#l^g&2i)JUdhfx^
zVc;6NER=u%xDKR$z!+T_e)LOmSpX=t+?Fj-q+uU2L_!liT)1hl%k=?W+qZAmu%*8y
zgcq#AdeNi^30aHg`3aMpdB>yvKxr6j=<`c#-h2vfENWjH>!M_;ac^IrLwNW)(r4-K
zXU}bXJv=N!QCo+~-UXK?!JsE1<@c{Up}0~CnmoXIwxO*~$nN9E3;N26uUxkv8Ajhk
z{@@hS3ajYq>Pn7@*eA(N45r2D4w0=}_Z9No&+|(sW;;p*4m+ol;=z}a6Wiwb95otw
zOgb*NZf%0n3m?lzk17hY0GaU8VY&*Ht>*#+@+*6$-@VHQua&K+@%6++Vd(6M!BmP3
zZu8>l(`D!qm?A1cVVp`MF}Z=1%Xr^Eied*9E3~Gl_Gi=RXY&VWtsidH=&X<d=kMdk
zk66%5-O(bw>u`beNoQufZzq7y(a-O@mEj%~e-vY)D2Z}=@+6|SfzSxJ%jn@JU$_v3
zBJOm%&knIUT&xUBUR#accw!^`1upO1B|vtPEJ#bM+y<wmM=%gcxkd#?BZ(d(J<;2j
zp+J_{Boc1?`7>C~a+tkzQ93EN;VN-suI6T5p27C*%Yl@@2&vkqL26f2GBs#fSXqg|
z3?~XX`=hJ0)vglx45RK$O${+~!WGXX#K%{G{ganOHKSI(w3oA|P`F@rqI2pv&oGw>
zd!-fp;PaO+#D_u4D=AP6Qq{`F=2;A@5D{-49v%X-guVUpfFJtyopECwgkO&2Pe&Wl
ztNroQg@wlwUT*K2p52;WRa{aM2S*Oh4BAW`J&lv-btB0DbHtIs^Z{hTBkhYXol$_Q
zQl<7IRWQ3U_41nU9@LkwBO@`tWJgy|PZbZxggy#=B{K_4_gK&K->+Zioiw>`e(Mxw
zA$rG?A|KQ`D*$h^z@kXwz});i1NRRkx+AhYNk|(_^K)+gZ<}ze87wR;AlNbJh*AOr
z(LFOvQNvsVXuZ!^w?6PF!Z+}saPqcn^9@)hX&1pz0Ns>m4(TJzQQ8YF+$1KdT@9A#
zj64PzM&mRKP>B=GY`sMn5$-CzY11kYT+%4Sz(mA!BONVmMqwcr!iT6WH0BMHjs~4<
z@*ol&GbYHbq|Q!zJw3fJU&UP$hNJ3VRyISxgL1V$36BR8OcJ1-(}d4JoNi)=q#D!S
znwS>fkSCRh2a%!{xilT=<lvx_G6%hlfdUK-C11P73{<>(S1zeh=kSWC@s&Ph^3bMd
zAa>85J!>Btday7AaYiOOCI|M3ii<DRouechXQLKYVweZSg5g)!h}9xA1=M9~>OpA|
zP~)UQ)(IP#FI8(Mnh-a!AKk0LE@06U=i0;56K3|{wqT%;pjKGy!~_K-b0On+GBgAe
z6X9Q&K4AvgfW1f+{DiJ}1*jrBM&}@t(aw#VItSOZ^_8j)h_~(IpcC-woQ@n>MMhB6
zh%EqbqPyFX*a8^!iJV!r1!)6G33+T`VUd3GCIigd;XrB4dUO_X9U7d^(KK3Fx|buI
z(J)2)I2Hn^0f90_q@&errldpjP&W@%1M(Xl>dfA~`x<3}ez{m?sEK&dr0IvSK0l%V
z01fi1lbR0QaqO^84r`RTGngA76=+4wbwJ|JLAB>_V$cnl87>_`LumeDglwE17|wDi
zM+Zm8<#bV9R%y|k-4wNgd-u8{s@|i!3<K%ZE-@#ocS4ovT$gT(2m1MKA$${7HZ{bu
zuFn_v-1TE|{$bFVu;SW!J4OzyU0YilN7vI{7K%|Pw`0e+`|5Y#*vPD3<d4?224Oil
z)52oKeoEKc>=zJsWD2FV6<EPS9TpFTjKvfrRPj&ePgwZLi$|HitW20<rsn1+L87Qi
zdoi?!118sD$9~SqrN566F-}&wW`aK$VIX-49bnij6J(9~Lu3t+{ZM}4)Oud@?GOj(
zCpcl@5Y}k1%sZAZ3H5oaw93lQ&dzRt-G}fVPTQtePHy{YC;&ZoGJJ$0P39{%if-gs
zg^fRd;lkKgstD#&7z8&j+2loi#q7Ytz!5>Xm<W0ORpQ{QeSI#dm}IgGsOcf3bCNGb
z*jw}2ZIprGHg(d>S7J`Vm|t8RRNgYr_Q!FM!A|qm2prNxCX*BE)-CE+&G=!ZyU^{1
zNr|;x4?BF}Yq82C&r={J%7fYd{V|v_Fbg<&h>MXHYGD*+7@_KepHKf~RpSHHX2&B(
zj0(3l5uduDp%Og@$T#HJ$^86$iK4eJpR;E(pdUs^530@f{Kj~t%TzZ-BJbfQDkvym
z9wX6T?OTY8#Fj0b`}1sc%*@zfb|eiAE=nQpLdpa&(HS&3z>NVI{aaBc^obIhFMbUs
zfFP3D08ktK+xlpsC@Jy4viQq6Fl=Bdd;FM<oqZWZ3i_K+Bh-)~nu-q}xQOSBBLgH=
zG&b%p+_+4wWM*BL4%{<0Pfvs0yP3tr#PkkbWMXC}gC^ui5QCu<Cw<}MhaGvCjWr;R
zQ?LDNv4OjGrT_5<9VR5d73p4omOjCTi4;_;!Uqq2uJz3L)YNGl9jG)8k&IQ0tp#kd
z2?-@ZIFT8*H*fZ&Bwxi4u5G>{{4mn#ynDC8R(*oqq%7&u;NxCQ6%gYD(>xG!)P{}R
zjI>ZD#4UxZs)Oh=rym^*ABB#b)||d{X$@6yZXIM6gg>YWMo8H>Ijg>>_SUfX$@{5Y
z8;*<xReCxz^YEsjS$9v5&(e=@ID*)V*yeeR&TJ4CUXGZB%-_Ci*IBnB1S_UZd&o$H
zpdcnnd5R3#uWd$y-&N^~yqw%|7?v0{i<@H0hqOdLIv_yFv+Yqdv@RMaX38=cNJeC1
z?c{~gkk$b>TE54;7@1H)#=-cerj=w}h}k>h)JsuwP+Uu-GAZugzG1TX%s3nPI007J
zpd@24XP4AC8VVcK3}ax}5zMGMV74q}H#eqD(X;}hRv`*uqM>Vaee2?HFjCRD_|#Fb
zORPtxj2^S8RP^TH9>Sm|<EbFlK?|A&yf`=<3F&O9_5CPE-2^ctjvu7=!c0$6rsiF#
zRAa~-ToyMpbPTLHqCqY1`gI|aom98Pg4!i+K|!?PU*6SlB@+-(L>M5zoG7e5cWGFL
z#G!I`ABg*A@D<JoA`mtiI50+?&RFk(*~!INeK>z_4a6MqPOwGw4MsgNcNw>!uDMgx
zB+p8SAMOd71*F}szG?YVso+gh36pQ0c^Jw0=pOyjnd4aRP<LM+ZISh2)82z<ty1SO
zLIBaPN~`VqbEtoTT~ALGV?6)=Z~vWp(7Ckik-`&&{<G)W$$!6Qq;H~^x845a{{V1h
Bs0IK4

literal 0
HcmV?d00001

diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index d471dc358..e7b06e172 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -27,6 +27,7 @@ import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 import APIPieLogo from "@/media/llmprovider/apipie.png";
+import XAILogo from "@/media/llmprovider/xai.png";
 
 import PreLoader from "@/components/Preloader";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -52,6 +53,7 @@ import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
 import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
+import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@@ -258,6 +260,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
       "GenericOpenAiKey",
     ],
   },
+  {
+    name: "xAI",
+    value: "xai",
+    logo: XAILogo,
+    options: (settings) => <XAILLMOptions settings={settings} />,
+    description: "Run xAI's powerful LLMs like Grok-2 and more.",
+    requiredConfig: ["XAIApiKey", "XAIModelPref"],
+  },
+
   {
     name: "Native",
     value: "native",
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index e3b4e2ee8..33750cba2 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -22,6 +22,7 @@ import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 import APIPieLogo from "@/media/llmprovider/apipie.png";
+import XAILogo from "@/media/llmprovider/xai.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import ZillizLogo from "@/media/vectordbs/zilliz.png";
@@ -210,6 +211,13 @@ export const LLM_SELECTION_PRIVACY = {
     ],
     logo: APIPieLogo,
   },
+  xai: {
+    name: "xAI",
+    description: [
+      "Your model and chat contents are visible to xAI in accordance with their terms of service.",
+    ],
+    logo: XAILogo,
+  },
 };
 
 export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 1b69369f5..cc17acfd3 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -22,6 +22,7 @@ import LiteLLMLogo from "@/media/llmprovider/litellm.png";
 import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
 import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
 import APIPieLogo from "@/media/llmprovider/apipie.png";
+import XAILogo from "@/media/llmprovider/xai.png";
 
 import CohereLogo from "@/media/llmprovider/cohere.png";
 import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@@ -47,6 +48,7 @@ import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
 import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
 import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
 import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
+import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
 
 import LLMItem from "@/components/LLMSelection/LLMItem";
 import System from "@/models/system";
@@ -219,6 +221,13 @@ const LLMS = [
     options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
     description: "Run powerful foundation models privately with AWS Bedrock.",
   },
+  {
+    name: "xAI",
+    value: "xai",
+    logo: XAILogo,
+    options: (settings) => <XAILLMOptions settings={settings} />,
+    description: "Run xAI's powerful LLMs like Grok-2 and more.",
+  },
   {
     name: "Native",
     value: "native",
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index d0b0b4893..c59a77e71 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -26,6 +26,7 @@ const ENABLED_PROVIDERS = [
   "deepseek",
   "litellm",
   "apipie",
+  "xai",
   // TODO: More agent support.
   // "cohere",         // Has tool calling and will need to build explicit support
   // "huggingface"     // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/server/.env.example b/server/.env.example
index f2d16b310..9c513f62f 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -99,6 +99,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
 # APIPIE_LLM_API_KEY='sk-123abc'
 # APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
 
+# LLM_PROVIDER='xai'
+# XAI_LLM_API_KEY='xai-your-api-key-here'
+# XAI_LLM_MODEL_PREF='grok-beta'
+
 ###########################################
 ######## Embedding API SElECTION ##########
 ###########################################
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index e5de59376..55569be07 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -525,6 +525,10 @@ const SystemSettings = {
       // APIPie LLM API Keys
       ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
       ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
+
+      // xAI LLM API Keys
+      XAIApiKey: !!process.env.XAI_LLM_API_KEY,
+      XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
     };
   },
 
diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js
index 84e480b31..390278f37 100644
--- a/server/utils/AiProviders/modelMap.js
+++ b/server/utils/AiProviders/modelMap.js
@@ -61,6 +61,9 @@ const MODEL_MAP = {
     "deepseek-chat": 128_000,
     "deepseek-coder": 128_000,
   },
+  xai: {
+    "grok-beta": 131_072,
+  },
 };
 
 module.exports = { MODEL_MAP };
diff --git a/server/utils/AiProviders/xai/index.js b/server/utils/AiProviders/xai/index.js
new file mode 100644
index 000000000..7a25760df
--- /dev/null
+++ b/server/utils/AiProviders/xai/index.js
@@ -0,0 +1,168 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+  handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+const { MODEL_MAP } = require("../modelMap");
+
+class XAiLLM {
+  constructor(embedder = null, modelPreference = null) {
+    if (!process.env.XAI_LLM_API_KEY)
+      throw new Error("No xAI API key was set.");
+    const { OpenAI: OpenAIApi } = require("openai");
+
+    this.openai = new OpenAIApi({
+      baseURL: "https://api.x.ai/v1",
+      apiKey: process.env.XAI_LLM_API_KEY,
+    });
+    this.model =
+      modelPreference || process.env.XAI_LLM_MODEL_PREF || "grok-beta";
+    this.limits = {
+      history: this.promptWindowLimit() * 0.15,
+      system: this.promptWindowLimit() * 0.15,
+      user: this.promptWindowLimit() * 0.7,
+    };
+
+    this.embedder = embedder ?? new NativeEmbedder();
+    this.defaultTemp = 0.7;
+  }
+
+  #appendContext(contextTexts = []) {
+    if (!contextTexts || !contextTexts.length) return "";
+    return (
+      "\nContext:\n" +
+      contextTexts
+        .map((text, i) => {
+          return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+        })
+        .join("")
+    );
+  }
+
+  streamingEnabled() {
+    return "streamGetChatCompletion" in this;
+  }
+
+  static promptWindowLimit(modelName) {
+    return MODEL_MAP.xai[modelName] ?? 131_072;
+  }
+
+  promptWindowLimit() {
+    return MODEL_MAP.xai[this.model] ?? 131_072;
+  }
+
+  isValidChatCompletionModel(modelName = "") {
+    switch (modelName) {
+      case "grok-beta":
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  /**
+   * Generates appropriate content array for a message + attachments.
+   * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+   * @returns {string|object[]}
+   */
+  #generateContent({ userPrompt, attachments = [] }) {
+    if (!attachments.length) {
+      return userPrompt;
+    }
+
+    const content = [{ type: "text", text: userPrompt }];
+    for (let attachment of attachments) {
+      content.push({
+        type: "image_url",
+        image_url: {
+          url: attachment.contentString,
+          detail: "high",
+        },
+      });
+    }
+    return content.flat();
+  }
+
+  /**
+   * Construct the user prompt for this model.
+   * @param {{attachments: import("../../helpers").Attachment[]}} param0
+   * @returns
+   */
+  constructPrompt({
+    systemPrompt = "",
+    contextTexts = [],
+    chatHistory = [],
+    userPrompt = "",
+    attachments = [], // This is the specific attachment for only this prompt
+  }) {
+    const prompt = {
+      role: "system",
+      content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+    };
+    return [
+      prompt,
+      ...chatHistory,
+      {
+        role: "user",
+        content: this.#generateContent({ userPrompt, attachments }),
+      },
+    ];
+  }
+
+  async getChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!this.isValidChatCompletionModel(this.model))
+      throw new Error(
+        `xAI chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const result = await this.openai.chat.completions
+      .create({
+        model: this.model,
+        messages,
+        temperature,
+      })
+      .catch((e) => {
+        throw new Error(e.message);
+      });
+
+    if (!result.hasOwnProperty("choices") || result.choices.length === 0)
+      return null;
+    return result.choices[0].message.content;
+  }
+
+  async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+    if (!this.isValidChatCompletionModel(this.model))
+      throw new Error(
+        `xAI chat: ${this.model} is not valid for chat completion!`
+      );
+
+    const streamRequest = await this.openai.chat.completions.create({
+      model: this.model,
+      stream: true,
+      messages,
+      temperature,
+    });
+    return streamRequest;
+  }
+
+  handleStream(response, stream, responseProps) {
+    return handleDefaultStreamResponseV2(response, stream, responseProps);
+  }
+
+  // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
+  async embedTextInput(textInput) {
+    return await this.embedder.embedTextInput(textInput);
+  }
+  async embedChunks(textChunks = []) {
+    return await this.embedder.embedChunks(textChunks);
+  }
+
+  async compressMessages(promptArgs = {}, rawHistory = []) {
+    const { messageArrayCompressor } = require("../../helpers/chat");
+    const messageArray = this.constructPrompt(promptArgs);
+    return await messageArrayCompressor(this, messageArray, rawHistory);
+  }
+}
+
+module.exports = {
+  XAiLLM,
+};
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 51dc57553..24f027cff 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -789,6 +789,8 @@ ${this.getHistory({ to: route.to })
         return new Providers.LiteLLMProvider({ model: config.model });
       case "apipie":
         return new Providers.ApiPieProvider({ model: config.model });
+      case "xai":
+        return new Providers.XAIProvider({ model: config.model });
 
       default:
         throw new Error(
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index afaefa1c9..c9925d1cd 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -146,6 +146,14 @@ class Provider {
           apiKey: process.env.DEEPSEEK_API_KEY ?? null,
           ...config,
         });
+      case "xai":
+        return new ChatOpenAI({
+          configuration: {
+            baseURL: "https://api.x.ai/v1",
+          },
+          apiKey: process.env.XAI_LLM_API_KEY ?? null,
+          ...config,
+        });
 
       // OSS Model Runners
       // case "anythingllm_ollama":
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index f5ae66420..47e2d8716 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -17,6 +17,7 @@ const FireworksAIProvider = require("./fireworksai.js");
 const DeepSeekProvider = require("./deepseek.js");
 const LiteLLMProvider = require("./litellm.js");
 const ApiPieProvider = require("./apipie.js");
+const XAIProvider = require("./xai.js");
 
 module.exports = {
   OpenAIProvider,
@@ -38,4 +39,5 @@ module.exports = {
   FireworksAIProvider,
   LiteLLMProvider,
   ApiPieProvider,
+  XAIProvider,
 };
diff --git a/server/utils/agents/aibitat/providers/xai.js b/server/utils/agents/aibitat/providers/xai.js
new file mode 100644
index 000000000..9461d865f
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/xai.js
@@ -0,0 +1,116 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The agent provider for the xAI provider.
+ */
+class XAIProvider extends InheritMultiple([Provider, UnTooled]) {
+  model;
+
+  constructor(config = {}) {
+    const { model = "grok-beta" } = config;
+    super();
+    const client = new OpenAI({
+      baseURL: "https://api.x.ai/v1",
+      apiKey: process.env.XAI_LLM_API_KEY,
+      maxRetries: 3,
+    });
+
+    this._client = client;
+    this.model = model;
+    this.verbose = true;
+  }
+
+  get client() {
+    return this._client;
+  }
+
+  async #handleFunctionCallChat({ messages = [] }) {
+    return await this.client.chat.completions
+      .create({
+        model: this.model,
+        temperature: 0,
+        messages,
+      })
+      .then((result) => {
+        if (!result.hasOwnProperty("choices"))
+          throw new Error("xAI chat: No results!");
+        if (result.choices.length === 0)
+          throw new Error("xAI chat: No results length!");
+        return result.choices[0].message.content;
+      })
+      .catch((_) => {
+        return null;
+      });
+  }
+
+  /**
+   * Create a completion based on the received messages.
+   *
+   * @param messages A list of messages to send to the API.
+   * @param functions
+   * @returns The completion.
+   */
+  async complete(messages, functions = null) {
+    try {
+      let completion;
+      if (functions.length > 0) {
+        const { toolCall, text } = await this.functionCall(
+          messages,
+          functions,
+          this.#handleFunctionCallChat.bind(this)
+        );
+
+        if (toolCall !== null) {
+          this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+          this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+          return {
+            result: null,
+            functionCall: {
+              name: toolCall.name,
+              arguments: toolCall.arguments,
+            },
+            cost: 0,
+          };
+        }
+        completion = { content: text };
+      }
+
+      if (!completion?.content) {
+        this.providerLog(
+          "Will assume chat completion without tool call inputs."
+        );
+        const response = await this.client.chat.completions.create({
+          model: this.model,
+          messages: this.cleanMsgs(messages),
+        });
+        completion = response.choices[0].message;
+      }
+
+      // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+      // from calling the exact same function over and over in a loop within a single chat exchange
+      // _but_ we should enable it to call previously used tools in a new chat interaction.
+      this.deduplicator.reset("runs");
+      return {
+        result: completion.content,
+        cost: 0,
+      };
+    } catch (error) {
+      throw error;
+    }
+  }
+
+  /**
+   * Get the cost of the completion.
+   *
+   * @param _usage The completion to get the cost for.
+   * @returns The cost of the completion.
+   */
+  getCost(_usage) {
+    return 0;
+  }
+}
+
+module.exports = XAIProvider;
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 98caea5cd..fd7d06e8b 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -169,6 +169,10 @@ class AgentHandler {
         if (!process.env.APIPIE_LLM_API_KEY)
           throw new Error("ApiPie API Key must be provided to use agents.");
         break;
+      case "xai":
+        if (!process.env.XAI_LLM_API_KEY)
+          throw new Error("xAI API Key must be provided to use agents.");
+        break;
 
       default:
         throw new Error(
@@ -228,6 +232,8 @@ class AgentHandler {
         return process.env.LITE_LLM_MODEL_PREF ?? null;
       case "apipie":
         return process.env.APIPIE_LLM_MODEL_PREF ?? null;
+      case "xai":
+        return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
       default:
         return null;
     }
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index 086144bfe..7ccbf13c7 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -21,6 +21,7 @@ const SUPPORT_CUSTOM_MODELS = [
   "groq",
   "deepseek",
   "apipie",
+  "xai",
 ];
 
 async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@@ -60,6 +61,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
       return await getDeepSeekModels(apiKey);
     case "apipie":
       return await getAPIPieModels(apiKey);
+    case "xai":
+      return await getXAIModels(apiKey);
     default:
       return { models: [], error: "Invalid provider for custom models" };
   }
@@ -466,6 +469,36 @@ async function getDeepSeekModels(apiKey = null) {
   return { models, error: null };
 }
 
+async function getXAIModels(_apiKey = null) {
+  const { OpenAI: OpenAIApi } = require("openai");
+  const apiKey =
+    _apiKey === true
+      ? process.env.XAI_LLM_API_KEY
+      : _apiKey || process.env.XAI_LLM_API_KEY || null;
+  const openai = new OpenAIApi({
+    baseURL: "https://api.x.ai/v1",
+    apiKey,
+  });
+  const models = await openai.models
+    .list()
+    .then((results) => results.data)
+    .catch((e) => {
+      console.error(`XAI:listModels`, e.message);
+      return [
+        {
+          created: 1725148800,
+          id: "grok-beta",
+          object: "model",
+          owned_by: "xai",
+        },
+      ];
+    });
+
+  // Api Key was successful so lets save it for future uses
+  if (models.length > 0 && !!apiKey) process.env.XAI_LLM_API_KEY = apiKey;
+  return { models, error: null };
+}
+
 module.exports = {
   getCustomModels,
 };
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index f3f19fb9d..84f971cc6 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -165,6 +165,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
     case "apipie":
       const { ApiPieLLM } = require("../AiProviders/apipie");
       return new ApiPieLLM(embedder, model);
+    case "xai":
+      const { XAiLLM } = require("../AiProviders/xai");
+      return new XAiLLM(embedder, model);
     default:
       throw new Error(
         `ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -294,6 +297,9 @@ function getLLMProviderClass({ provider = null } = {}) {
     case "apipie":
       const { ApiPieLLM } = require("../AiProviders/apipie");
       return ApiPieLLM;
+    case "xai":
+      const { XAiLLM } = require("../AiProviders/xai");
+      return XAiLLM;
     default:
       return null;
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 202ffcd99..d705fb730 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -539,6 +539,16 @@ const KEY_MAPPING = {
     envKey: "APIPIE_LLM_MODEL_PREF",
     checks: [isNotEmpty],
   },
+
+  // xAI Options
+  XAIApiKey: {
+    envKey: "XAI_LLM_API_KEY",
+    checks: [isNotEmpty],
+  },
+  XAIModelPref: {
+    envKey: "XAI_LLM_MODEL_PREF",
+    checks: [isNotEmpty],
+  },
 };
 
 function isNotEmpty(input = "") {
@@ -643,6 +653,7 @@ function supportedLLM(input = "") {
     "bedrock",
     "deepseek",
     "apipie",
+    "xai",
   ].includes(input);
   return validSelection ? null : `${input} is not a valid LLM provider.`;
 }