From e0a0a8976db2bb4cbf060f54ab2f2598292c2ace Mon Sep 17 00:00:00 2001 From: Timothy Carambat <rambat1010@gmail.com> Date: Wed, 27 Dec 2023 17:21:47 -0800 Subject: [PATCH] Add Ollama as LLM provider option (#494) * Add support for Ollama as LLM provider resolves #493 --- .vscode/settings.json | 1 + README.md | 1 + docker/.env.example | 5 + .../LLMSelection/OllamaLLMOptions/index.jsx | 120 ++++++++++ frontend/src/media/llmprovider/ollama.png | Bin 0 -> 23630 bytes .../GeneralSettings/LLMPreference/index.jsx | 14 ++ .../Steps/DataHandling/index.jsx | 8 + .../Steps/LLMSelection/index.jsx | 24 +- server/.env.example | 5 + server/models/systemSettings.js | 14 ++ server/utils/AiProviders/ollama/index.js | 208 ++++++++++++++++++ server/utils/chats/stream.js | 29 +++ server/utils/helpers/customModels.js | 35 ++- server/utils/helpers/index.js | 3 + server/utils/helpers/updateENV.js | 25 +++ 15 files changed, 486 insertions(+), 6 deletions(-) create mode 100644 frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx create mode 100644 frontend/src/media/llmprovider/ollama.png create mode 100644 server/utils/AiProviders/ollama/index.js diff --git a/.vscode/settings.json b/.vscode/settings.json index dde2d134b..459f57fc3 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,5 +1,6 @@ { "cSpell.words": [ + "Ollama", "openai", "Qdrant", "Weaviate" diff --git a/README.md b/README.md index 44e0557fa..36127cb35 100644 --- a/README.md +++ b/README.md @@ -59,6 +59,7 @@ Some cool features of AnythingLLM - [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) - [Anthropic ClaudeV2](https://www.anthropic.com/) - [Google Gemini Pro](https://ai.google.dev/) +- [Ollama (chat models)](https://ollama.ai/) - [LM Studio (all models)](https://lmstudio.ai) - [LocalAi (all models)](https://localai.io/) diff --git a/docker/.env.example b/docker/.env.example index cc9fa06fc..0db90aa23 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -35,6 +35,11 @@ GID='1000' # LOCAL_AI_MODEL_TOKEN_LIMIT=4096 # LOCAL_AI_API_KEY="sk-123abc" +# LLM_PROVIDER='ollama' +# OLLAMA_BASE_PATH='http://host.docker.internal:11434' +# OLLAMA_MODEL_PREF='llama2' +# OLLAMA_MODEL_TOKEN_LIMIT=4096 + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx b/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx new file mode 100644 index 000000000..a2034bf75 --- /dev/null +++ b/frontend/src/components/LLMSelection/OllamaLLMOptions/index.jsx @@ -0,0 +1,120 @@ +import { useEffect, useState } from "react"; +import System from "@/models/system"; + +export default function OllamaLLMOptions({ settings }) { + const [basePathValue, setBasePathValue] = useState( + settings?.OllamaLLMBasePath + ); + const [basePath, setBasePath] = useState(settings?.OllamaLLMBasePath); + + return ( + <div className="w-full flex flex-col gap-y-4"> + <div className="w-full flex items-center gap-4"> + <div className="flex flex-col w-60"> + <label className="text-white text-sm font-semibold block mb-4"> + Ollama Base URL + </label> + <input + type="url" + name="OllamaLLMBasePath" + className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5" + placeholder="http://127.0.0.1:11434" + defaultValue={settings?.OllamaLLMBasePath} + required={true} + autoComplete="off" + spellCheck={false} + onChange={(e) => setBasePathValue(e.target.value)} + onBlur={() => setBasePath(basePathValue)} + /> + </div> + <OllamaLLMModelSelection settings={settings} basePath={basePath} /> + <div className="flex flex-col w-60"> + <label className="text-white text-sm font-semibold block mb-4"> + Token context window + </label> + <input + type="number" + name="OllamaLLMTokenLimit" + className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5" + placeholder="4096" + min={1} + onScroll={(e) => e.target.blur()} + defaultValue={settings?.OllamaLLMTokenLimit} + required={true} + autoComplete="off" + /> + </div> + </div> + </div> + ); +} + +function OllamaLLMModelSelection({ settings, basePath = null }) { + const [customModels, setCustomModels] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function findCustomModels() { + if (!basePath) { + setCustomModels([]); + setLoading(false); + return; + } + setLoading(true); + const { models } = await System.customModels("ollama", null, basePath); + setCustomModels(models || []); + setLoading(false); + } + findCustomModels(); + }, [basePath]); + + if (loading || customModels.length == 0) { + return ( + <div className="flex flex-col w-60"> + <label className="text-white text-sm font-semibold block mb-4"> + Chat Model Selection + </label> + <select + name="OllamaLLMModelPref" + disabled={true} + className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" + > + <option disabled={true} selected={true}> + {!!basePath + ? "-- loading available models --" + : "-- waiting for URL --"} + </option> + </select> + </div> + ); + } + + return ( + <div className="flex flex-col w-60"> + <label className="text-white text-sm font-semibold block mb-4"> + Chat Model Selection + </label> + <select + name="OllamaLLMModelPref" + required={true} + className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" + > + {customModels.length > 0 && ( + <optgroup label="Your loaded models"> + {customModels.map((model) => { + return ( + <option + key={model.id} + value={model.id} + selected={settings.OllamaLLMModelPref === model.id} + > + {model.id} + </option> + ); + })} + </optgroup> + )} + </select> + </div> + ); +} diff --git a/frontend/src/media/llmprovider/ollama.png b/frontend/src/media/llmprovider/ollama.png new file mode 100644 index 0000000000000000000000000000000000000000..2a898a6ebbd85026715453aed13885742cbb7712 GIT binary patch literal 23630 zcmeFZcRbhq|38Xs%E&0$DUy&?Nj8-t$tIiZz4w+R4dY75C?r{_P)21XNs**%Ns?Kz z`Q2Y#*Z1@Lem>uG&h6aJIe(sRt}DIAYdpu}aev(J_s1(<TkGg%S`Jzg5|YiwkE!aA zkdSH+|4~!om7qOr6!?qA{g{y#3CWfQ;y==qy<2wRMLBOZLvIx~7Z+PsZxVk`SsPhZ zC3O#%0O|c6u6nZjB}E)%rKQ9VNl3~Z6qk{amXtUsAt56z;w>v7xnDx!fP`c~LU}w1 z3Hh{xuA#S~=1Ey=Hy2S$8#gOkQGXYAoB*frm&LCxw%(S!{w~h0Ub6m21pj<O7QYiO ziwVlp^8WKj&eO(DR!3F+pO4~SM+EJ?z1?NS#QgmHMExX1-8|2U?LTzrkeIlHn1sYW zd}5zhfUCEq|2|i*ojB6JA4=8M%i7by-P^&<m6tfQrInkH_YpzCe|_rjOR};4*J<2+ zJe~iX#Kv08*4dUA7%#E?qWk~tDs8M~y&b%rZU5uKm7Kl*;}vHI;!b3pEnUwY5%k|@ zV{2#W<LoUcujl&b`!>8bwq6eBTzU8Lx;Z;rx>(AI5m)K#@ZS#e=aqjyve^H@`Tu<E zzuoTt!;w((_V#qJ`qyZ<dWl;8dBukqLQ%}Fn57Ld9^(Ht9)EvJR>{Wezg;+SCueLu z{^J#l2=RuyrKgvzZh*V(5kVbWFE<}gYuo?64E{Nwil?omx2=u5gt&y{zWoyWBo4@l z{l{DXnn}5T-ca-RK5ip#WoK=D=+J)geFqL4ve_rK|KNdr2gRkW_lfVfJz!&PV<Rmo zW%sXR{r#2yaU^vs97tM18Y}+LeyRNj4oM#r|JQf^y!k)AqU+{kZBN{myu`mg`M+QL z-yi=!ndW~VuK&8#e;?NWk0#pvzilmImjCQoVt(-k=2!CH<`=KYD*1TZyLrm%I=Jwv zSbAEzSXp|@5g-1qH~!=1{@*V}M2&x2hyS`le@plOu>$d7**~kr%~RLS%~@W>($yEc z=|5lozq-i(ib4PJ8APN1wuHt070u)u#2dm%Nc3xtt19XG-~5pg;I3=?WTP&BVONdq z_>UDW=3``}0*WL?6sjz`-rnAvTou<Ry3*Uy3+6tQd{343a<aR5{qQUMs*Bwe6s)?* z=b0au#*sgm4(jN6a(KLE<>t?odfDxaSEuC1y_x4i@2wtPzWFonROWO*|G8YlR9hup zM(Su9rq{p7sL9DJAFD)PRpe!?R{a0}`oBK}yy@c<jH_|BMsKAySA5<aYx(Sf2uXO3 z5Os|5c~yS=Uz<g`es6}kX^^>nI`_uLm)h4M=e6H38KzRyi@VEalo}U(`~F?o%S&>3 zWo5|ssnTf{Req+N+qbV=zaIbbqoEKB?J)snOwXLV*4Ew-k1EqrLy>P&Q|i{%JBe@1 z`YWDhX|UQ)d?PhAwzBe|P>P19`PXf#LSrQK_0NXfvubL%;uv_EW=EReefU6G*hQPx z>XV_i=gP4xUEwRYZe5ag`e0EXyZM-wR;;dYjH064zKH0jVr^}0Li_eL#%~pT+tERG zqs_N-B}b>=a(em&;`2YhwjDcmjCaBIl44nJpTKL`gHE|P&&tAd^YbXGckkXsT)Dz@ zkn_^<q?FcC>dl+g936#ye|(JKI_%4P=+Je!WYWky2dM)GMxI4%IvBhtZfs&=Da9dJ zBgUepwEG&LhzNa4OUuV9FTts*G46(@CgusxRF!B?HtX^p_qi{}tJjw&4tjm1r>Cs< zsTDJ4j8{E>;lhhoDSKyM?o6_1OP9W^k=yxjcqyWONYTMzPk+D|*Udv-lu=PpGyZa+ z86%%o=Du7zW!3Y*_Hv6&=R>O(x=gjdI!=hS{o<!SU?s&dJD#68+#a?rsg)_8I5cEA z6Og~3KI+}O)7XSrRaM*huTbyVvqx1Vz)Ir!%+%BAVV;desy8<eMd7Z+=|>g<ifhC+ zZ{GaYUg~^8!cMD0C-~%F8NGb@lKS`bKvGT6!tnR5LieYAXXM8|e%$uv))BgdMhtN* z6}LZ$aghv3VTQuRix&rrD*9g>p019HjiszKE#8yq<?Kv<?#<1JKG(SVp=ZA*KevQj zk6}#kOx-Ud6MIU2iM*udH^;qu_g4EmG$}PZw{dVN`}rOC;(6%7g9p4<j*<DUo3N(Z zT0SzK{r%Z(B=JDMqKb-&qL~?|lxDq!cvgNs)zHw;F%1p=1>0053^o?m%zU@`$fItv z`o;wIwckgV$?C<oadXE$w@@@R+=jy*3qQwfm>TnWvZ<LT>|(cB(B}F1dD4&o4{`cU zQP0CM91V?)e0%o9_of+<S8XlFG|^Ts^aoXXk9RzjyU6KUu|rQ!&wpjI!ef0kz^t}? ziA=LoNKCBxhFfj?lP6EYjjt*+O!|(zzAov%v9@sf?AaqO-F%A-Os|Y0pIbaT$`cZO zP4LtUyK>Ey`5(TA-((z)^Xm_w3JD2O6>-C%u(w{AxYfk8ris;$duy#o7wPY<UHfek z{JZBI#wY#v1XFMn^{7aAo<s1@wgbe9fA&oAZSi3ViO7x)9Zp$qYOVZh=SQVWMd+UP zU5cV&7=AikTmS8w{oCTc&z}#Q8H#gFu}5w&X5X|lOjR>6*|CN5V0d0$UY2>?)2BOl zLKpeIPfP^lR`uTVm*dtoFt8q|4f!@b&9FRQSXk&WGpOY4<3q9bDS8K=@}VgAh)CiL zbEB<hg)VuUDn2Ksr*r)HT*1JW5M!9KxL*6$$=$?1v9vnBi`}ArBR$t;VBYp}o~9d{ zgVNH7Eh8g?*iM0gfp*SZJ9q9}T%IU7ZD4SeTZ2S2jZRux`k?0*>XW-WFBaV{D%yPX z5~q@%-@?VBh?yg*LgyHhnxk<3oPvV*3dQ3B%mTZ2^UKTM@|)tS7ytI-M*}g%Wo2an zE_$9Hz7YTD?>Floh|v{RQ&uJ$9vR_dO9;q_D*w&EBY(1&Uy`2b)%HlsZS-5V48Odx zvpU2=r+_6UCdPlZk$rZ;e1n66f};AwR?Xd8i3xf5`0*R3!j4Jb+XV$Q10f4sO-)Vy z87jN4W#7F^Emph8ANupg0I!%>QiY+fkI&F&x4K5`r|Qs%HmN(&9<kWgLVNeBI63Vt z>6N>3Ec*59ldIni>RvqYnLvEBvbLVxSRdG^PuXj*%62%`vgvk_gM-7*2Ny5d8eP0} zX}DFaF8aB}plu`W`pekZNF%#>1GeapXB8s{$AyjMk`3iEXRZyLcFymN$8O|Q2<7za z^I?xx&K_{$Kgnfv`VEOlD^}qUmhDi=7S+=%IXO8qZ&etw@7y7Ge8eG~LOb!HsQz}6 z{Ds9uf3tm0PvWkZzV5)-ylE}s#_�H@~}mG{EQ@Sp*3}%VNu^&`1PY#cXZG!Y-@H zPmla(o>MV#bHACW{q^hDAkI@&+`Nmg5(|&iK3yFP4;$a2%y3oSOf>B_#_Xx@WTbu} zh1%&XAzD5@zK=CQ2lPwKHm7KeE(G+s9{BY6v&U3#RhB?ayUcz4y+rJK^G3^TimFII z!Dr%w<-$}SLgUE7E;%{Qr+(AwmX>^C-7mj=6B6FL_g#O#yqLK~IVpd#WO(c7i<WDG z`^CivD`JSDwz4Wc*Uqh~u5M<)Ai_w^$i@~M#}m43e0;pxF{aH<vUX+aX+WBA1BFLr zU7tBQ8L6kIC-Dt?sq_hvqY;YXA3uJa4f<N5*JAV}Un_&3=9?rr(<|P3z2KYjAslw) zJ<}tjo`>ds{w#Csx_84^P&u2iHFaP8J-tFq)%S%ZZ16Xk3Q4uKwE^?mfpXlxe*YdG zZcM12Gnx-PqM@NtYSVIcNA%?!A!OG3W;GlE0Rh#4T3Z+xMt+TV+N>N=Va%qNkkY)3 zXoyf&{kS5qhTEjVHAcU%rl9fVk>ta^KNK}IXvHRM-Q5}4#msIRe0ED{jHF{1wea`< zI6b*f?exnxZ<<o~ne1j!dh==EiO=2_FJ7#gWwzO=xVY>K3<_%b9Yo2iyxqoBlwH)k z_VwOA6(JUWK0Z>X?&A4$Nh4j|Xte}3J^_J-Y3WbSGRbLaKR-pj(R*!2B%{qw`=%Dk zI1(wbkzTg9Xa4f?tm4EM)lQzgIPlJ?_=uUU?P#^uK>D8Z+LYU|T2G{j&NB05XvK}T zrm2*4pK{!z!$Og(6Myw8vAvf=xDeIwnddeVZFCbcmfMhbvT|}F4Z7zZK9ss|SVl%0 zW7W}4Lr$iure=wJsa@C=YToQDGt!ZJqVv=DM0YQoi0O?&!00Ph!y^5<q@?rb6^|dM zX|O6gW%3>Sjepm!#_6mrNJrbYA>Y(re4k0R1G#Q_Ind1EG*-Kjk&)pO?}F}j&okRj zXlVE^B{erSY3u2!ttLy7VCIKs{onTWCDzu;*A<avYoGt?uWce`)gQ-POirGp_w@1# zSZYAz$FdBdSvIvsicCpKslI=_#w_XPO-@|((yxFr!;8C4r1oAe8>B-Ntf{H_Ha>pc zAmVWkar_s0Ml(IWPEs6SzI@@8l6pTzl4vNRuB1eYh_+UHt94XaPp_+*>D9e^!e%C) z-oMxVGCHaxFjw?alb4YfyMvBRWN=i0d&drykdPxip;X)Xla(4qJ+q68FLnFM-~)(r zp;V-7`;4Qbqsi+^nv3T37tVODAFnNU*Ew^h%;Gs&-6f`YRUr|PFM7LQRnZ(%6vVcs z=f|aLYHC(TO8^aFdHp)vCnzt^4P2xxoD#~_tY-A|$&*M4GRr*ogPh27!znRY0BBdQ zMq-5OE^VcaF??vxR1$s(3lCtiOehKgnfEz`=ciAfR@t~QlIPD;-^t5c4mo@MWGrSo zGsM1o*!StXk01R{jGZwSVe4-Tu(r0oa`Wa7KcgINA=SV@Sz;;o^Bv^e^01=f+B+_# zSZdqLSq>ca_4R-SzdtkaFf&IY!rkq1G}C15=<H--XCGVe_^Bj>ZOO4?M*zjqt5>gH zPEP)^-~ps}`yDUVy=PT#iS-d~hS=Cx|6NU67gavpr=Uq|Z4BB`*f|cs`RarKDL~_P zpvq95F|3A;ju(4rR^k9L3cD@<VjwI2)n<p)_t9CVvd{HkhE`?Yl*eAgawYyNiXv>t zB1(v(Mfxf@p_CL0M`F{MR6z@%x4nbI&@S28+1cvVx@QYY85b|o<I?M*g0{qM>g($} zcH+cNm+nX+{s5Z4>+Gy|O33dV_cs#d+`oVS`S|z<Ly_OD6>6PQvWE{_miIh+aNj&f zwNpy{=uu1YZQ=(Ho{xwiMF0Xg-$blB1o@d6am0SkEmR_Gbevn7D=m)a>L3!m9QC}b zD{TMB_}hG7Lqh{ECK^|D{^CVdRaNp|fk$4{*Yl-ltQqfYeLz$UKYsi&5!%H}Gdw)Z zxPAM0w+APS8lSXuib0wn25zN7b&%?xYySMXwUwsbrE?Jz$jHUD8R7l;bEV8A>ur<( zf8y6qW;uLvqP_TeGPcD=+1#85@#)?B_t9AF@87@ADl2<eLj9z3JQ_dm?5oG?yl%5S zC@(K_A5#AE^(zsyFez^*<}bI7nyGb;%v#vkkYg=`uB<4ls4%gzjxKz-9+e{`ASn1# zgK2nfPrNoI1GdKtr1`qK%<d$UT%FlZvgRjrby@g#?p$Nm7#v6~tskl!S7{Tc#|Y6= zZo<fVEX~>8Jm^7&Xsa3)w(*dH^QM+ydV2a>T&POi7A4WNoq~c?d-d-tdU)(779AH? z;@h`xX(onUWOleckH2(D9iuos@5ME~)}~HUTPH~1L!l^YVvJ_}F;CwXl1z9Ru|jsr z$pve4CtbKOJa6H(lRs1_nLCL}{WcS3uQGT^LgT~<kMHjbW~XGHUwY{|KhDa^>JK2T zt*srNxZ^-hW#zVlf`S8&Bqn_i1A_S%Ni!;M-@ZNm@?}O;aCMK@0e4Y@+>sSLj}6Yu zofK!#U?Q5PlHWPYt=0H^X?8WniKCSwyVv!Ay1M$?4<BM5Jdg-pUmCkxTpZhGw@I^8 zGX24e2R|17N-RIuPNcQ-iHVKr(oUs56Z#^?52>$Qxq{$;jPLuDi!v&rI~_T%q~om< zQ$iy(9bJ^J@cdqjrYd@bY+@QBr;`o~2t+U3PjRB5qZ{cu9|5#RPyfud!rI=R8U-K$ zCFM6(y-=vt54pd7wU1Y`6T5wKa?*dgX2Jc-OC}a}_7T)SO%H5av$`jS+z<M<iICO0 zY3Fo~=RH)vki*EtL>i-vVgl8h1T}VSekaY1wy%EPdIh?3%#}R&Tvq3};^|~_HkOL{ z+Q4~XM@Pq*7QxJ{+*}Ho8!q~^Ul@jJR{%5#D1NT}mKvsbs%|6nrHzCc+b*@!Z^yl> zg`GmF1(UJv<az3IjXlmr=Pfhu@{gmoWgOJ8U8JT<<CfGz6(q&6*+$~J`3E}9&Ptpp zbiUC1xxLg2XPI`Ih*dt3)=OhN+;aYmX`*M9O-(svWo1{{G^$Lu6zR7f9uC>(_$a;m z+5V*FP31j5&d88ie!uK?klLML50Cj@y27I=%5y4}dlCV(LTPrjJ{VzDt5DuWT`ykR z<BS-ibo3~dSlvD2B6SqaiXw*|8Mn1=ynZ0E$C{r1PV|0ilm<YdbIcsqPqHy4FfYot zop1NOV)>VevOq~po&y4!*@rUq+q0kAdJPthlchA1Op>F=i5vXu)#uE;Lv-XgG3nO6 zrHksfhf+5DC2b_OmlR3Xi{I%CU_@cjN^?isQ@S&{AX`^Bz$oH?ifG!;-}{-b&~pRz zA7YLduo&K**1Cx~{I6*tmJ8v=OZ~eYneDilUtb@kCXWpFIgj;I{yqDS5G{Z#6BE;j zE9v!<lo$)-@v^f0+Y;6385r8{sXx*wkg~X9m#uxQBj_s$7zidwhtr;?7k>Tnwz0KE zfMSi0kH<pXQE?BH1$d5au@TK95v`+<^+Vx^KW=VfAOetp&I4p4ZoV<(RVmP=e4Qik zdNW9qovQrrl27ohfwm&Tcmmr#Wx!Izqh^z9F>!I&MoPjdff?#AZ8zh3tbxz4&xzsJ z(_;nFx|o{EX10}OiJv+C7|+W9N+u2EXIENBQARv-spyN1j3l$Mv*SmiIeq$cb^7E5 zRsQJMSbm%c*cGcDsqovkZ;D`Q?xAu+wQ5@F&L^~V%cf9ngBN3*?I3HBB2E_ZY}%+} zJxjlk2$I+y*5e(yhWI2iGc#F7{Hk%ZGT%evefXK1n=7?z<9OfIm7SAAhPwXwcxQh8 zqs`Z~h3G#neDi(kR>j$6!o;6k&Y9NAT6$etr@-4d*a<TL=|X7Fp64h~%4}Mgi44&= z2Z{&-tAgEe;lhQ|z<EakEjK6e91RFK1fFQqRBLjJ4Jq9PqoNN+tatf)If?yWVvlrb zHsazItIAJfxX$BO3wV>9oJ`O=hGkZb$$?T@6c=)MQ#7t*W+wIani7C8H#c|j>kT(| z1Z99l@*l<#EE=_aX+jtHMZ<ZOvC9$ah~261Ly<PO&z0dX9}Xs_h@2e$A0Nv$-H>rn zR#tv-Kfqm$pNW>1_Hs%}4BiS}9?w4*_)Eyx*qF?!#^YxQ8R>xNNfxywsi{cDZ3tJS z+1k{^t|=LHKPZl^IV$~DDzT|a{e5@$sc|{f0H~QhRtHFO`wx<`u(7?l*ZbNDb8A@P z$_19<@{Jn_AuGR$akeWD&=@N!E>=gWc=g(~e9kqQ8*SCQy=Y^Uk${;)7?_b5!WDVL zO-09cC2Qqx)i3mU<wRgtBctsE$jnp`PT@arfIUfWj(Vzg#TD85WWd?yyt{W(Cdn@g zA^pr^bcVmZyDh$rzPjBH3+1L<0Q1t)67u93;~R}uLn)scjFLOI-`u&n1GAtg#G-ur z-ORnOrJpC|jgnDEj?5wi*N=KW$Ir;@#qLdMDJj|2)i?cWvvmp@0K8{k(1~T;xg)?^ zG!PlD%K!Y+Bd6x03!$hGtH(wWEr|NGs;bJcHuzBZbWFkxHPI9ezO>emdryX(PHPA- z+e>i}(@z9QZvI@I@de&Jg<Z1T=PUK;xwbU>k6r-7gp7|JX<)!kNkz3^N{So|ReB!S z$zrpb3*WyxnoSrK=o&ut-50txVSc-`R6L6`+d}y?i-d&4%#ZSRe&F7c+GXLArWz9M z)2AZ{W~liR*QVA7;Oa)K4+GKwUT5nU@?C4;O=|_VY2Fy&u+(OW$<F4hTWi9MYUFyZ za{h^Omj|!r#h}zy?)m&V0jU6BZyP-&7=xUO3U+L9pm`#~Vwr>6YA!l9rLe^{_F^68 z)A?g5DM21!uaVu_y4@M&CLj}0m;(?FhOLi<z3u54uP`(&(kCWqbaZrNBPl`EYkmpa z49OIns5+_)6!VNbb|hePV73+WJF`nl+Dp`5J)5uFrR<JUP5_&lKxoYIs~4(Kvzb== zGl|u$N{F6)5{;nqancv;Fjxl)cUkoY;lJz7y7l>zD>rT&9#gxjf_Nq&!MxDx#URJc zu)f%=kb@IUPq%+=jl1DzcIJ%atvhchey56_y}-IxKdZR-%{29sV*<*c;Cg(i5FwP5 z1(^Bd<a*uqhT9quxD6bNXxd);mKNrCa;!Z6p&0hdX=zmPs-7p#3&de{o0T+!E5p1- zhpmqA00xqV^puJOo!QT|V@Lf^ecbG1#Q^w>r4UV!DX0@9_6nZAY@l<QF`*F@-thd0 z@6$e|qQSw(`R#H)>1pZc0>;AM_+l79wRnK2vmR+mRGPTo(55Yc?KM@P%CBT&BhVB0 z_tlE9?Zh6w+ZVJ5#sEms1B4I3lnAGMPh~m21*b$gvid7uK`k(F`DSSck_C8$uMNg- ztcV3PDCa;D7>OmL_66#DwuwP598o-Ai0Ya~F3PBe_R!zuSzUKeQ55K20EdnN`tl)+ z){X`Q6yikrC*_gHz2Hj)P=1ZIMIAfM0%#!P+I#-+ROL>KsQSE35sD;_j1y518f|pc zq!2TVm4g{Z7*ueg3U-j<aQ^hrp;zosCL?VODBgPF5|AB?oR05J(O_X=xqR2}GT(sr z<w5liZ$9)#VL2o7Rc@@WWOt6+6?GlX0I83|Tde$^tp1uwwFyC%h4#49V_pI#A+%k( zv}JWvEbRGp$#Y~M0(M{i<}K^o_k^{lr{|~diKC}kyk)t3CqKoYqBPpB_qfLy<X8RP zlIqA?w{97&4A*oaA+_N!j~~mdj-Lv90kTf+wo2LV$Q&V}j{p4Fg_?%udfEqpIF!1^ z#vJ-Z#?pHydQgOc7n)sPnI1_oFjF!x=)S*lr0)VaCiR0?f=5#fO2alnx6#KMIgsMa zS*4{YKDPhd9WKe#OWn86<T0;^h;v?=*BKB}AfDwH2RE%wx`nCWccxZ{Sn5$JrA{d% z_}NcRB|{@g3b!8dw=9j~KBT9Mj&F%qjePM!*}1<u;KPMoV8kXS>OmZx4_p74iF!xC z>QLfqx1W~jpv~<a9bZh8)LmjwSY_O_X%lHl{ntTC7d3Tt<&y55$k*7r8oA6^`iP|n zq>hiQyuDe}Z+lcbYoSyC3b}E>tqFwD*CMyMIQQ+e3QH}gfI^FJObM|iWWTe=?~q2a zwS_>MH$9a#D$+-VNxymXa|>|`{P`5zanq2Xu>cB)ucG9ZnD(RSByH5{-YR#>LpI}X zQBfmtfk+6{AYn;ak6*~i(iTG98=aiY@;9x@jS6x^xj?^=3BaI0S5`EcErG(Wd^<ir zk{Aw(i+Sr-L?DmWE>9}5SW64>Z76BnqqI>a^9u-&)5eftHKRC@xv?2vM2UiO4r4|P zHcBQaJ+U7?=!0LR?47$UL<<rt==YE381zd3V{;vJ;;KTM&UcQh`uIryp6W|rkY6NW zOW0*G_Em$IUBob)f{x8%=ta!SmKG%_Hx7fcCqOQPkU&v<x1^*QypHzSvx!fh$Vy2| zKX-gY`D@p`F2`BK)uDL7Tie2xn8-FUy&^$zrG4g1l!18&>&8OZ2K(jN&CeECk9c@^ z5Y&Y0(@7bEfkURUDSgk`FfvO>5Wx6^=tLw993HTk@}#()Fe`V^kNtA94O_kPd_zPP zqa{eNy7TgO|LA+l?pC{Grx+W7L<rU~2a9rtjPtXvZ8xBYsW;5BAtV}#u-%mPq0J0i zKlG}#^<8f-XK7lY`t9eE?Q&7g+(Go%o8=Nb-o>a>E|iKS6c>vU0@KH*equ;|`N3L= z>QVRaix3+Stg5oUJ{!|ZlVqqt$*HMU;Pd}hqBfPxK2i`s9B)6ez{wrDXsLSi=(T}% zmVYTrUnMY0Ji#-h{r)4HLE@eqHwFC#u=(+!!`542>jAxC8*7NJ{0eg>+Yq^a{BUMr zWkq#s?eD)0=<|%|>4$%6e^r0~gB=|mU(Zg4W&zz(fc~mo9Ht|#`EFt1jgBe-W*WRd zvpAA8(sM3TO-bqbKFPoc@k6EyIoM(X0s_yRWvB#-UfN4p;n1M;3DIZ$)gH*6f>N3U zwMX2$*3E<ab8S4UOfLZ%ug;~_HH0km#X<VWRL@{`<r2COiQoY$6aZ@w>_}JF=t6W8 zPe*q*6$wF{JiYekc&MC-gArw9UH-ST3Ch{e2%YZpz&ab|DcrbdcY*v*mSRb5z}UZa zs<*wpa=B8Hv6gQ?f2PCPfjjT&-XkgYU&r-PVG&N(u88(ytY#0$@UYNQd?-GfFtGl+ z0*I9Y|71E&hCGCbj_8hh=d`(b5^9wGF|UK1;v9+83=Buj%(&wte}>DYguZwnLV$S` zvJW3Untc;?4Un<iZZn}ZSOB+I{2&n{Q$U>ptWpM!dUcq+4yki&ZN<WfEio~X5K|!U z<QLuitshIBn~6kE$TRGsrh^$xz%-}9<+7XklP0i>L0a9#f4q{^RpZN4-_zM&ou@!X z&B&)gIRosU#WZQQ=t-ILDUVap*AKogICC&`^<ZpVTtJ2ja{JDmo5ZST$jSt0%!br; zd;JDN{J&+#<w><SL$|wspMI=8Q&2xQlQH`jQ*qTFora)SPrDW?O^Joh(%m<$V%EsL zdS2l9-oKSg|HYv=4~TBSvU9Qwnw@v=-*3oPiL<j!TkD}`(%`Y&MhUc5A4A9PpC;re z#UZyi7)7j7lr2ohBiqk6S|NE2qD&_^!Rhde@9Z0T%Fhwh2uaDz2Mh&+`-d+Po2dM$ z+);|AikZusxv1oPCJ(#PVgs^w&n^F98jxLPV;Dz>S&QEbOUSC9nCzH)xJ-hpxqJ5+ z=<0x62jD^EZ@J$et&!h(YshG-!EG29vF$kE+JGG%kaUEbi%ad?xpRt+ug2(9g+iW< zdJYwq)GD7k#USRSbNihIWEUY((cRJ&(R=t<Xs=wp98Sd(eA(|DZ+hOlfq^7&p+<*p z*~lnIchG_K2c8551RM(G5o3<O3~?M`K8e0akenPGDUqHAYBLOSH+Ro|2f!Ctp4|DR zU_KIaSmibL_T9VZPK6*eN^M)&Fg%y{9pK}2|Jm#})!P#EqkeFZS1Uu7;#!MpLkHD) zZ7s6a2jFcNL7rGhajeOkVV3vp+f1lR$0_*9S&$8(VF6{(I6nF`Jpw`rgcoA2X5N8i zu-3;1IcRnD^kyATW_3B1p^WUfnkl{yN!bFqy*eZX$>5;JFgd0YNPq@q445huF*BdX z_d%0kXODCF{P+sV7E5CQI-&puu=HONPE_A1YFbH>(5U=)!0ewdVrh+~FyGM?F0+5f z-%%7JUIe<*x#s}|c!1ESzF^`ohM=m{?`wzwD3G|4;JR2#vQHu9OmvqBKI)BjP~*Qs zfCWfy)lHr0ZFai4x|eVM{qf>@Z$H0^fuAP;uz(%QJV!OGG@0{QkuY@%N*%j|&AMa} z`>;T7&Rx7^!o{Dwb<kTDI&fRzt`y^<rbr4pK=TuGyJ<Qv0c4{7eF2JE{)6MAoZ4D$ z-=`Uidjuoo8n+I5RyBnwt}~+)%{`IEd_{nq0x%OH7KnZ^<IK44Q%G3h5E+K^Z<5am zoPw?e$!}|kuP*Krm{OqjxUx1DAAkk8_%Ez$0A2R3uDe5D9mhbCf^UQL>Rx3U$|cAg z4_Fir1k0ZsorRj96huIO%NSTQRUyF$>QQoVY!=?$@26Lf17xC%!+`$zC<`qi2AC<2 zgGNjH_J$p+MA5(?ZQz5zOo+6Wr>6uHu7Qb~9ML*bBX@44V$8GZZD*%SaPl?9XiqYN zajdbF=GY9XA<t5c{6&MigEB#y8$?!_isGc09&(*<IZ}cdWH_3u!zUx-%b_n-fz&S2 z3O@$uIl{m4qtYpAWx*+1$gx)zv;uA#X$n8e*)vbV1XqI`Z<~rP>@}%Jnd&Tat!P1j zF}s=uP|I27C!&lFO^unt=E8JNE&F2D9im4Zb~i+u&<S1u_4yAQ6BKk9Ec1S_<EZr@ zC0tBQd_LqJo;U8Hlnk>74NZip=uxCNetv#pr8YFELZ#pr7N)f-^EZ_{H|Iv-o}qAK z>#RkIAhIpggXs}SX&2RZqDIKG)brxO8o{0lpvhIv9!yzmW!Jh4G5PJMPqMW!9rjX0 zO^2g5Sb4nVHq?@;e3gd@9fal%)TR~|rU2U6S!M?qdMs@m_YXx?HMR1!K?iyQYIR5o z_yKDxnwI1y>E-FEZD@EN@{(!V(QU8m(puG=-Q1c{3;2Ik(a?D1A5>#;KQ2coIwoc| zz2oI076F2K+4}6<4&~GD7wb9cD^1%`DiWn9(A$oZQf$b#j*nipw$?v(`CRHft^;Az z;VgsTB|)v5iXd~ofB#Mh3xw>^W>*(!-7Ax^^%U$m_2=&B?17ae;grG}3-%sJA|fF| z@dWLEWU^=y0|fBnu{dM-{=)}MR{f+esCn>xvl1Ur_-sOD)Y)Ho9hVv#JIAb%o)_RV zlZh=&&kLX_oZ|RtuP2-yEjBOCWroDAtt{Mr@`Ph;ZS4RxliKOK_wKzw0w@bv@%_<X zvjbHDX@x6Pkkfj4F@R^3$cWnpjDK=XOzcH00%reP4BS*~S`|r9++Z#UvO>oBV>q%s z!Wj9lCnBvmd3ls5km|e5!xB?dQ*ZdvrfK9NIS^FLRLw&F?7PjK4rgDDdYY|KY>OSz zi;*!$siug0U9v@V9ZW>z!ax|0Tg^NhlDwd^J*-)v)!m0)Q2bh0sJD?AdUupZO-oBl z@q?wz4G+A`dB~F-@$M+(vXcIci145Q0DmbdsjPeVXo-A?MN$^<(+>6B%su0(?Syk> zZN8gG-e3gWUT`=-aQ*$;_t*S<c3mBhejzKIE@IQC%WIz*M7yz_{KNNn(L*iXDyTI` z_-`~d6HkTerrN&i>4_(*8PK?dS<2g6YFZjQ!?ZSdyLrew>m?qAOK^gK)ch%Lo`vLm z<j4_%^Z<A#94~|+3mQ1q%*Z3@QJBM;f~ITY2tyF&N1kiMTb58zs%M*a5OM*?qJX6I zU9ahh3gGkPE&aHItq+K5i}(^?LtT_64u8=58zbPDJ1X<p2l5E<+Q!Bg>f;zje)N=Y z<K(>AmHHAH?Go4FSfF%7d`l$S%C+C014f@b5J`Y_CU|KyO+G;&?(?KNE>nJWMg=*M zcmIA5<Hv)hpHk{B>lU`~VYaZv;6JcWzm+OaR6tlM&jA1Z7Z~oN&M32Pb_Yz^$<O~H z-x{kAW&>o}84UPPf>^DhiOJ`iEl)?x#N)PbDW5&d1uW*#eb&!%cDOOLN`sP;(*4)B z(*&|JhGFUtfhR0XC=$6kEfaFrEXxf2``9BC8QsmNilFh)z%&+GXIaGpHx}~@wIoJm zeEfNSW+?6LcP~F*7|EON>h4|<NQSG<UTT<dpuh^0K-iBE`JVhf2~MQBGZ_@-%x#`E zYKZbN#znu!%?u0-Zs+F4EEGrB8WCQY@^*O@sLRkn$s8L!<Q?R5E07oBkQYG9nZ0uY zrw$udINh!^T3vm88QT-C5M#KwxC|dTcHPOm?Z5#<y}0_beHQ+-tv)_J)!Z+fkQ5-y zLkmIZ&@Rx`7Gi-SM=)(Ko<9%)r-XopA4EY5iR%cB`@z2CPOig`M$`>(AX%ae@ay%U zf(TdB?<V@>2rH(Am|UKX#bzv;&Gs`%xO%KVW>2vDt9eK`bGzM_?1LY-W0_-_9fRio zEgjM(!4INt-ILim>e(kHHfxO2qadBdUbCF;uR$sQg|+RgC++Q=oTiNvkV6RX(C{$S zqwFq6up`Cg!-pn%q068+v9KUP!HKV+l%j$bNHBcj+x&gTd}T8#{b%{Hn?VL&IQ!H$ zVDgyTBjfzl8~{pSA41;&y9`H&Z0^ssnO;-TA_Z+0it8t3iry_&ZNXeaTK$qzDu-`_ zG6^p^2j2h^H0dFBgS|(XZXR2QLFq+*P0-pFQ<8xsK~T5nlah|w+3ijhF(PORn{($> ziu6~Ui#Ei+DR)Z2oZ;x?gi<a7P(P=vj0u-bz^zulR&jb_MdT~25hx60HZQW&?p0S+ z{?m${Hrr#B;M}>d@ANGC>WR10<Kwd?LLkN;8c#kHY-qP7008Jmv23XuZOACpi-Gp( zv7O`eFRPL@c6rP8MRyPJPY^UKminq}#mU_&AlW1&CChv#jh%ZdnNV4i?!C=I8-t+I zh^p(fsVO}|6vSOrt~sb1pz$5htj(lG1cJq)Zf(*HeAeQ-q)@3L895OV5eq#7t$fL> zr97l}Oo@f~;@aUW#{@8V0#P%&2`mFLWA0UjBuApXi%Ul8OF!QG+oAfgvc{mm-Laks zL2>iuA(%Y<Cm(eaLhH`}$ku-|X-P1HUR+trD{o~`JB__3BrL3qjlmuGjV~A;v4j3I zyolTT<>Y>p$Mke}$8D?)Y^WVOMvc&K_Ragrj@s$__wUax^_<a+r~T#kLY$s37_?;} zM_|S70_?@MA`BYY+1VJpnZ4%ktjY*8?Cxtcc!@M<n*j51Md1^=!mNZukece-w=wqh zuQ0@$#g&1LEm6-0uWiGk0Y-z7<VSDSR)U}fM&0ySbAEK$G%*A#mpg3L1C-TvB2B^g ziei%?p>cTLZAhfGZewj41J%%!xTAT8u{$Avp^Sru9o=Yky(7KL(MQqgokMyPxD@}C z#8);F0=sr;2Dz-7#!5|Pkf8qm^2MgENF&x#9Kf{DpoCJ2$)vE0|M20USD|-6M~Il$ zuEu^XMm-F}%Ock61@IP`W?N9<fm-_b$cf!7bjen3{{0z<j^V2O*RRkdlSNpp)*iuq z_#-+axNga;NI}^Htu+iZ{^>JkyrQIq(^?5@)gG$v02FK@hK!|GHALGVJ$P`{;Cm{N z87h9%9|Y(H!-h;B>X$%Sd+KB@P_AN#Hq34yoKaE??o-C^jg5^cziQv;SZ$*OK=y~n z5@#8?7M8RU8VY7_#IJ6J3tlP8T%ALEAG`gb{h9G4|MA?k8CZ1rrKP*_(=LLjvbEjS zA2_iOs$l<HYfl*B3?DmhpT4Q`x7km`v{KxHX3R9y4j!xf`i17rIxI1e-S6b)G6D4h zaS<ZcwQKdQ0SX|!y~f^9c;Bstrv-OIXhvq6?m!_XIN%^Sj$>85%6l%l)vd9?IQ@HE z>Dn;?At9k>Vqr@sh*}h(fI&l&G3<&)E}-~}GpCNkjNJ;_0w<Y#{3tAPL{LpmN$DH> znE+Svts_C&Iy%D(Cz4Mb4jPQ0x{S*bFZ_%`b#>`yHOJLsxjGHa&G2SjJM`<CV(oRB z!N*5y^<7L8kg`skpo8YL$x#nXz{s1!(+wGZ{WOW{BR`*@tR}8DbM41D!)pIoAN|bc z5hZAo{h=!^kmdYYeK9aVrGrajnFQ6(#KtDG_x{Uxg0DV*{@L2XKvL$$>KHs^3yz1% zL=s?Bss8!qu)UjG-J8v*l*@vbJc+dfp>eiHx<393vH@HMAHqjP9{jPw0=AhQdJMqw zvaKyfm7niX|25uV%$iyHxY}#=N}e{*8h|z9e$w>y;z&z`p6J6Wcn0FFB5XKdVZmIU zJ$n{>?$!%skR(71p`<66k=vD(i<ERdU|T`1E#ifmycCFvgu$U*VMFe*bI*&3T@qW} zX}WroUwr*)SHctmf>h=P;rl>Hn0VY`RYtH{h}v)^-STGSQaeq+>WO*61$isr>-BI~ zFNbt>)OThf^LpOCzRNXHaEUajFdQLD!UyxWh!vxF`hDee@q-5)PP<wehUUJ2XQC_8 z-%;w4h;;+)1)|dBn>Xoiw8>088)I&XQRahALC8xZJRAPKS640LIqJpX+-TxipP|CK z4GeinyxVHN&J080d}~g|W_PRYJUk50Gxi!3`)lTUJnaKTg&~Jn^2H~t1O+X^KLR@6 zm1A!Q1{m@?1Nx3!J|;j6=OahfKN~25iksR#-|KH*_fQ;9&clcAr;o|0odyS253G?; zT3UL@M<Hr+K+MTVUExMl`h;VBIpqv>a9ys8gTwPQ^H7ks{`^@Zd>{@Xih97@Vbyg{ zk7!JQrwx(-yiL5|L*Om@M<th&Tg{ViK>Y@L;%&Hu3KJA}UmKcj=4sB;ya7rg|CjdJ z!~XP(3&A&?+5;v_rBZF1JHzwlUJO01`&{966iWa4>X^bQj>;Sd4lqEld>_@(>DYXH z87m44dOvDd_puB=!V5iO;se7Xh;u@ohcOCimUKF_U7UV-d3k0u%^aC&Iiy-6SJ~Bd zYO@&Wi$tQ%#h79yVLZBBUfww^I{?!Y93=L^b{f1P0SGh3MZUp%&2)98`iB!y1*N>2 zHLZ!~j+~luMQRLOdkP9s#^qBqXc9hQVMh&y!w0A*VGsrRO(0rGCJ0)wNGr2mCPn&? z+y+6KqB8?)ta9@4XD~<dxf68cWN^P*A~z8H+LfJ0cdu2q+12A00@MyXOpibD;lso{ z_Fx>$J#dX&SwL_IuQCe-!S-ibz;K&Mf3vi-G_3O6HGArB<|jC^P`?PK#FT;2U_}2% zj~*R#A0)XI^1H{u!Mbsm$q9k0G_A~T01B!Qxk2cNkEh;%@T00a_?(Id9C=gnsv&)S z7RW9l>d*H14Oqe}L0?b!pCl!>5*!r4sbVaC4Ak)ugxEHEY*Ro==mF2Ila9n{-jLCB zSP^DRxRRC@w=!MFgQ_X>JqN()*qaQMbLYmQ_o(+C0oOWKvpWo{8sMOjpaXr<*s00k zn<Kmn@opVdFAsbnG`jhT?h$yShr8N>07R)6xElag%FrTUbWL3n;hNYTE<1L!C6tA( z1w!7}s@j~^<wzWn5TwwPVOr6L22?c_l}++%zs?YjZkz!A{wGr_Q{&^Ltq+ip6dfJE zCyRKeXb^^j-q4jvBC-gcEFBA!)7RJMS5OF3w|hL`toaW~k2>35+G7i<vD2rSkoDuG z$2SQac}Zs)IRoESfKjw*`A1^-BxU?32szrc%Cm7|3}9la`ll8|HqE7o2gXH&|NdQf zw_p9jp!-2>6O#m@)<-2@IewxAc5&P$md2O9p$wGb$O6__D`soWn4Fv(s}HX!a0G$} zCdl*tYoa5=!vweZVDdLKU$a4Bb}$67FJB%r8^g*YxM=yMFW2@Q$k@yRlbOA}{a?Q% z5eNw)pwvQ1G5tx0RANL##PKx;*NUJHU6Hg_=~~-NVW?%uT<|R4dM`WL)Hz4<7db~- zYYZyEYSeQFzW6U|0Ccg5nx4nF@n4k(RZ4{t`$9qj;czjFe6A)qfTTkZ-;`07>ibTl z=b^7RvbebTxPsu7W2M1MbK$v{&<usWpy@!Hb8<JGVg8Oc8&B}Y@ilLh`EW<>zUI(0 zZ34FeRPnGO(BIWZbpS1`I`cG9)qvQI2B(Z1KrCY9K~-mGkrFSMS4?XHS;IC~24?YM zR$X0K6Sr(!eFQpoN=7f>-eJ8ZrBmLMq5<A^?a8;tb<JXC)!SekqBRuFfgMXpDZ*fg zki4O-J=E&CNe@;SK)7NcbO&0Q#`<cG73dO*(ym>*NX>?dHiJ)PqK)}jYPpG!ybnmC z?SRnwP$&Sfn$P?e(`1FaP5OL&qC04&Erwg}BC8=%5M?w1+jFr(?OYu~=_cGGNCsr7 zNq3<OUcEYertkO6OzA*h{ia4(MCF&i?W|t<dgDxBOI~iSC5n4&8k!keH<$C;%hYP8 z&m&o3+a|)1&~L&9rKGDXhUk0Qw{Kr{@X0fclfFSAgA`<>kcr<r`aGLc!0#|I%)CEy zY6zvs9(aVRYrT=Kh%k$ad5FDkk>W#%InUcG6u^%#$mW0FlizvBdZF;~V_#E&rR7TS zio}$ovUo81dn<o(D5QG=fpirbtfZ7Ls-Vmx*c{Xrb)T&$=(L13WmpTc(82&5x3QSC z@vH=XI}h_7)yy`~%>-l`*jQiqSm`nH@U;)@Wuz_-IPD3>$jB%jDZM{cz#zY!bu;5? z6m%6x1q7`G&-<A`do(Wq6P<HjncRFKC$A_cx>ev~nKjj|ps7RP3HyJ+%0osdfOKNj z;V42Jwn|*cX*#O^S{(!!inRPMotJ>h05!@2=f9H_JO5FY2pa(!s@(o!{KjBf-le#i z{wKRzj$)@2<W`7V`O+B<53Vp_Xb2P5-lLUMgRmQ##n|UXzaMg6>~k74M(X@fWD+;| zxq|Ax(F~f3b}?X=5WxT<p5~sr;%9blMZbu_?2yiR5w_Nc>SLcje-`{PEQt~n%?b{Q zQj+vh5UMhxbwt?Ok4<YvD2nG^5JmM-U0r=+OY+V|7}joq6f#SsJ_xd$@RNPgduf2` z?$pma&<7x_=WE_Y6OxzL&uA~RgN44bU+;Lq68-SuL;puV_FSO-R%sjtdQ~YnSk5is zgS`|O@=)WtW`b0gLwrZ`lNzXdF{gwZFANL~Upn2d*F7zqf+)VQ{PwA-C|LWGH+WJ` ze7Pke=#F|ByWO~$fdu6g#09h4J#b@yE3dW_(oA{#R=a=K!)FU(Hwm9ArYH13*6v^i zCJk-`>%qm@5oFFmpK8Dn$ngH{U#<&-Uq+fU3%QSsmVYX<iP(@Xb%GObw*-sjb*5ac z7e}+-CJ>l{tc(|Opro_BrF^{(nVtwj(6_Gok(8k^Pw><&Rdo4>8yryJRUQ|Fl!i_j zf+s7fknCod1wK+YHSN7fK~0pPDJjz9=PFlg&>VK^1jklDrsKyW)Nfaf*U`l&qr!A> zGMB;ZpgkfRBc-!HPly&3^%)JC8?>-n@88b^%1(Uxs{p8Nm_}cKwzAW-POXCKqOhHt zTOBN=8=*lB!YfdJb=L{IhV(B)n_`YyLbbd%xGsd4qtLFN@ra}C<>}NDIzlJZFGNEX z*L2aIv{s@86p&l<FL^1}ZLvxy2GCqKD7|DR#A4NQbr<}8bc?dVYT4QrOLL<Hw4Ht_ zeNJy~yRysx-<r@Ggm1zKMs%@s1Y&ujmJ81~><5!XyM4bI<W{sKkY{Ke7>@#T1?_H6 z@Is#-DpvYM=M-S4)hWNa`mxNgOUS-xBSmL0udFNwB%Nkp7x~=X90#Iojga^_j|8${ z#%}$hct+s?(#7s|4!QVx#*a^r=m`HIfm051ASk1R9R_*$5lKs`=+_4XbRGn7d^@>9 zx&R^;gnuG;kt1w_iY8E$`{K;daj#uy8Zrx8^Rcn9v6%nfMOq$R^=6Rpe!@rSFU-8K z;UjzY+0z$ZBWGJ%TW3F*xCL-=An<_exCkP7`!u;Ikn5Ez5l4c4XFYn9V(_^FsEQaE zxIk{r)}0eodfYD$_5careg3;%N#}}dESQwgP<hw7HD4&t+e^rXR?R?<#dVXA0?~t) zG+&=%H%q;E0HX@Z><XG<;i^Ebi5PEbTq}q^I%3^}Jfi~nQyq$yENkd;h}ySU7fzy| zYi)U+U~5xM93-w2CzX<s(W<9)J4-}&^gp7L4K@xj^9=pM#^SS2$v&Le&xpPR1QJWA z(q^}-u;Q@N8#32_31)UU*zJT51Tf7X4juR3Ka5Hu#Q>F@dnC{&<NrbTA$5#0a_KN4 z<dCfkq6}O?aEAtR==)6&$sD{Y+DXyG6_9cjJrkg|tO5Q>$62_AfP!HpAlkDsqn|-i zd-#xr@U?-DmbV^F3x;EnfE|Qmjo9S!Z4TY`QXk9DF(5eE3GB1y#~h*dC6r9Fab<#4 z_WJtz=94hVwjWh1<=b2*deF*IsG)`tf#Vb@?z%KANyy6}6QKnRcB-5MF%J%R74DG( z1czKFc;*c`bR>T))!8_ORZ?49Tlp$2qIL&b2bH3nt3zR#Qd{qHVE}q{$)g@;8CX=n z_e9*GU80`-x=b?Gy*#9=XigNuV8fT^C(cQ6+4JL0!N~q;*()yI0&U-{q6@m?$Oj(~ z3Pk5JDj%bTg`P>@n+jpv1VjalM+3r_PHhR-tllUF`SXj5i$=DZp0FL>H?QLclKUB? zN&ylXT^~z7$3Q+vCKguvF#cDgvE5;ahCvHch`A{P^L-#oh^&R_Mo>E^$LlVM^o%I= zT$~<xb-43!a-IWr8*wwBYXI<wigw()vqQ4O05gcX0=j0Y+kkvEBx?e8hP>G{+TngH z@cSMZSA6uz`&}u!g>FMq0Z0hl5Oqk8g#!lI#I8^DoQ$%FhM0Tg+m9c=T?=1jM7tOu zxk|S7>sM4;7Uk19&;$vRHM_8o4y9p#bfR$R%V*C>YS)*YK>ZJ)E+V))>=+PfknE_+ z4CN-n4s;6_;R`5Fi0TJc0{7WrwYsC^6W+4DkQ#{QZt$G<YtP-(TLP9@I-)|5uo!tl z8#;ON_5FwG=eDW})eQb^B7)#mw|3~Jm;)l8q@(RpB#LklaG}ecnc&|?-e#-F=PRK} z`_?UPWPQREfyS(7FJkD3C<;Ld6f!uLo#>^4mi3D5V)Mv$w?r9GYcP}&UEN>=4T?>b zFxw?8qUe$VIXMW<I%DfFEUIW4WX1u|v@%d~k{pyA6gUVnfcb69bv}7;ZBP_%J(Ni$ z24~jaPG2vppnAP6M}EF0Fe2e^Se@cT_kp&V*(J$yZxjo338VS(<HreF8FCLqYfL-J zeBNs<09H^R!Sreiakg*vt}@MX2&@heOB0&%eG1J&90k$(dMH?n9JKI47(kCfbvnJ* zQalRl=FU?JiQs>XHmo+)hE;jw-o=214(tV;1!Q*Ls6196GbkeXuo?3G-GYK4X>Q&8 zPD`W@ui@+wf`f&7lCZ_XP&$OBU81SWBpk~N^(cUz2jcJSh8QC?w0XF8;YP*-B=Arl z%CuIBcvZ3(<^5CVyiq7><N{_ORBq$qih%>%UeeN)WftgYmT(3iIN<lDuLS;%5u6~D zjv&lH^N!hQIhVrBL}R)z0pnU-bFyb>9Y*EQ!sAqpT!Ik@UHLo!qx9g+P(38BEyvWh z)QgMXXu}WDPyNhssb15+w>IQ3R(yfcY2)u=y$PCyT{-#r%_Z#c91y+1kaoX#F5RDR zena{7!&XikU{;VDP(!{~sO?J8VB+DKUReF!6A5pvudhrf>WZB*GHjW_b2LOXNb%5a zC&7221|}>#c+$Y@?|Uv@d#?8#4=4cX|E}cQ0bJ7clK?l0_V(Xy24z!ioohP@e~3DA zcS-gF_Ba59*K)HUOc?M8AiHv-_WG~(cS!Un9q5}*^5atQ?(neHDk5tWdYJQEKG}#5 za-2u&;Sj1%Vp=BrU0Is2yvKgI`8?qpcrD=0wky+-6L=Bcz1K_j{C_z0j}L6=h|a?# zh4pQONCr9wJtC)Qddo@SFab;>OsJ^Y2?UJ-!Ij1XVFl$PX|hJ{fU-lyXBY|9SCh^D zTB*D0KsfZu%dZ(|-Ur!YXZQ6cJNmUGYf+ZYpp>>oQX^DD^hmwK6C<v*kjK^UFS2%7 z!g@x0hII&vEK$D_K035XNy?CV$s!69Tya-ry=w)Snrqa#gzJH5+@G5IZ6Z`PWC?Fx z#p`--Rjg@$_y;90wuJ8<1Zy_Z5n9?Y;^>LPvos`kr~i3;gi?X7GExfsiyzSv0B>sy z9*gnP=7GtJ3)O7J^eKh>b7N2a4suo>wrUe}EiykzGIZ|<A))k}!M}hGyt`fB4^6D3 z(_c9z<+@)`K)?dxFJU?@IFs)_)q4m{2TA#>f7%RP-%4@0&wdd(TV_a)9Z;Z4iwOXD z0y)nK+{BsLvNNr9%tpJ7y^_7$tRzmAe|DmA%Ciaqgc^?vNwsxr8F1ny$aGWHx2_c{ zYvJ$@V>$KDDadMxt0*f&u8de+4VIgGxpSmV0s<$|9fMA7qyp3e;`EZX6KcGS7fmX_ ziXA(766_8op=M-cB+ksZ<53`Bwz877-$-F#&}ED!1&Qmg#j?M6>TB-qyqUO4wTU1) z(Xptm?mxOknB{`eJEp^i?xZ}p69hq+{Q$d&ZiFFsSgT;ufb>bYX^|SML#)7~VM*gh zHe-v#b?53=N6~2sX9O?f>l@Rz@YhAU+!CK<;wcYcRp{$`b&7Kom80i2D*VpXX_|)Z zc$Rs6_T#0=gO?_?l?^i#Kfb8FyGNHl`JubgF4wyanIFt_hwm{bYh*pvCfD4{M{au} zT_)lV&GDBS!wvC%?Kc<5w$~`^dGq?Fzi_%-=1SS6`?DkEp{FRo23uMxeW#jSSb6vM z?Vxg@eDLUyyAaEACT*`WHMFyBv1Rux{i_!4ID85VG&%KX&xJ^I92HFP!QR8h#&+;j z6&a;1MV<ry^^+R<jO6U2c0<OU|%RJSpDDzeKf;ZkI@ZIZ>|UQSM#)lGc9EMl((e zR%Xlv*1&-2m93}RiqAru)d^EA4CwBr!@-oP$HNTIb}M}3uX!kg^JxnSJeqfqo?Z@o zo^?OVv`F3Vl8&C<Vz4~BvD(Axxyba_Ru?U|K^&lOWfd3N%c#to*?yu%?n!Z&JG;)i zPX;C?d%JE+Y5t6oGn(hK)Gk*1-FR9rC4uc%iLh8|i`Ig;fI(B1X~d7;*K`U5^uDDX z)34Hg@LKnDzzKG}rmXvldV1thN(F~9iFeZz?WHBIU;K45P>5yN?kDTajRL&CxE4Cz z;_+tbZQXI|+4U*Dd@0SKurQ{5NlH~@J%=)b<&&0{0@#gJAIkBOe;pgk{T`J%cA!84 z_c}Us{$%5I<?L`9iE-t^^QnT-mqqW~cS!#w)fxEQEf3pU+rU7OHMz%`O;S>lGqAI+ zNlNqNsZ%FduZz}A(7$@M*rp3}wN=?)?;S06Uwhur5KULaZ+wf9Y-D6ar#toAlS}qe zud+-@>#oBErzXH0(LUxScj~%UzL9kLx?Zg1VB3uYm!ck+GTl<Dk_eS#O{S}F8u4hZ zdF3pVFgxo3@nlO~i;pt(olb{>toJ@#v;H>=``NZ7?|tjTg%%}G(<k>#xkCK?|57?$ zEJ?4jydF?Dkc6jtNC#=EKN1%*046DQ`7DDGYHk-56-B#7QpM*h6Iea#>nm<JYIFPY z+PbtK&8;1allPyYF&1!8N9mA)LK0ZjH)pO+TxjHnU#qmT^4nq$#1GI0<wqSNFtTU6 z%bdDJ!6+XLl}XXae){x=p!Z8XWonypBRJ(wbg@%YMiJPlJ<!BLyBLSAuA6#4{#GEf z-5UF{#H?m`=7#AR5w_X6(Gzf1!Nq<s@NsqdPB3wQP7pa4(h{$#?6`R3@Zki=TN)Xu zybpF}wvV9eLr3sA>hSa%H$ZY#4!huTBImY$+(fnI80&DxjT<W9=i&ywT$p=|9u(1L zgZE^LFkPS~eP&s)nO=kLNimVqspIM|WwzOs6J~Tbx03fRmsI!eVWB1bDq&%D@82~Z z6NrS7#OtHWrl^j{;t6{vCn)w&BkUT~5iKppS7B|qf9kki#lwfu05l^QnM+4S((^3Q zQG$<JYkzu{^)Tz6aLzlSE!=WK(yL~MhR<L}LKc5zwo6a5%&-9jTIkxmL4k`BYF{EH zfwvhGbg|I>lr<qsJkiHq>ic4kRoQun0;|Ye=vEVbc<ICK#zwVOc&3qEQ<pcbf4iOJ zj-vSYZWk-<PndAxxkKZ(&01`j54iO=C~U0Ov$`8U!Xi%-Gam-H!B)MLDVBNgFnpIm z*JSn!xoGRLqJf_va1pn;6wHNK;Al>OcNNhoV(*}UCUrzr)uCXt%K*WoEsrENj}T38 zVAWn`J&ad!xYj)KoZUP)0&M&7BXy;v;dq=3X%hF5tM|4aBd;#>Ssr}8skb`d%yM44 zDt`nj&bi=<O{;m%I(n>xTWNi5g^8XL=M_%*M?$u8cfU+`_>18Z%Nw9MbpHtW4DaRK zLOcovE5SEA#)&<uV>iUIcvbX8#iOI+Scp3IA8Nix6#mwMf$0>IFJ(HVATUS3g}nt& z3Ay&^08RCTrmwg6u}M0!_HD^qGL8hXxwyFK9Oa1cqdN?4c;8fi&E6y4IrD-k8be^& z;Lf=B-j{c<?@Hs&qn8FzKIpmjeQM6~22vozbl{PCd@NVKUP`k%;3wrk@XyU~3|7%< zrOqwMp{9)}saxkbbLLD(Ld0ReDZ(5A>A$e|#iJypJ%p-?A+OY3k{;|c?Hp-N@)&8_ z{QC84-724lA1)>(jlclqzx=Dfd)GZ|Ji;Y~-pc~di4RnixzV5o(AG(cUWqIHW>?zB zJkXp1+e<n94n3P~lx8)7F&Kvvb%Gai22pXqnPTIn!yKi9zM6x6Q$)`&hVG8M3UvhN zT9|$28n)<xev5{>dQwYEBTO9+BRF?XkPs=RZX`EkCF{bSPDlcH(h0VLPA0rvaOoI| zM+qC7nl>yhR{pBK(>>8hn5*GW8&~kL($?5JT_}f>!4G^#LGVVK=QmB(6pdUxF71za z)(9aLa2|9IuW8Xkcz~%B#XorSj!ekz^)JYEuQZQbF_R(>*A|Ml5Px~*U}1yXsjG}+ zD5)!h7DUnRkSnj!J-?8<7@xW|UbPe!fpLY-jg3nWn!@EMbMKgn#zDJ8A$a0@G9D6u zE~bu!*|*DRZ-*>Ne7YdpOEj$>G?V+n)Y<V)Iou<E^fQ>GKvD0i;TT_7@fh#e1^;kN zJ^#JCt8sz&*Z5Nx+_d|2{}DtugDRg1s<3Q0Nty9{nkcYB$M-d$7(!7%RNiHVuVdIc zj5~mWevY-nxuEv_RF^otT<{_z#AJ~s{&=N5*-*BUcodotK5}oB6cjv114<0~@cP4# zrh1=kh9FNguZ|blcXoFA|N6!R9r0f8llcfN9_$&!t!wwzN9F^GwgBQeQN7*jkJWCU zM!_{a_&iD{tT@(^XjEa~mUGYcapH;U$nNPek_-HGEYMYgbIS-6OAN~o&8sd?imkhg z&*HfO4}SSRMExokFh|~E^WasH$qKd!HQ&tYk{2-lF&DDmD{G60JRV<PZ!g%v<q(2r zL#bd@Yu%EgDe0bATteDH$+rqEU-Y4?i?z1wFKm6R<-?&`dIWhi3?Yet`UkFGvj!Ig zBudn>h^p^>v%5>o(e|@WT<Z?(Za#8YDIsCey?*mXHdLOGnrJqF?4R~TBa+(RuOn}j zyX8k+MtVBoq3;-fNLOU?bX&3v4c|Ym(}KjO@9bHKM=^-7ja)amns-l_ux%2)Cp0F` z{zOhMF|PxEd-<PlUeAliBwD(<zL^CAW(WH~1i4c~Z|_k=DRCb51#VV$eERm+FDWEM zQ~RDzjad&{q%^S<tV2WlXUjn#;PQ^5av{D`aPNYlNGwd21*|g_ZUe)J1fJQpPV8Zi zWT^SzdxK0a=2Ld&AZO~WGyKL{;@eb%f}ZLsrPxTsV-uo1gx&Zt5t|v6EgT(bu>dZa zi)wb3TGmsdmB_&H1l2pw)&JGZ`F}N4#&JAM2xNKiD8zvggAnK3Y=!&)1+u}UP0%q0 z0}9*#7b6nAi*K?6DIg$ffkR*@Br<BZ14dcr@Ga_$)1WA5&@SJKLXZNFNFF?J@=Nc_ z>5u4_{d9Mqd!PG!KF{m<e0digq+6o$U%!|CoG7%}GC%L9R;v?Y2K^n3=;A~NSXic! z@8YP0_rqq#%#Ho!6Y@NEwO@Pk4qud`S{7#sOxbGPJ0%(evXC|1)Bf&^)Td7#Ti<@K zlC!#@o9Hj@m#b!t#@%o(0c?7QQ5yUK;jJZ~BSt9@$1Sd%{&{MO<E2MG7(>#C+ts&K zqUFexR)t`3+}S}_%q7aVWCBnReS#H!YWwf`2*P?wnrWaFc2K)|vZ#ja=#?fY6pQ(S zJ(q`+1R*20*iH`|yJ_#U6)}78F7t(uwGCZHR#qvsyr5x{!J?Qai(cB61a7KIWw)Vg zGG}|b>)oQp24-fij(3VSVaVU@8x{rS0(k7i%;iK_{oT9V603uQv5b(qRf&K=x==LW zo-v;wq6^k}JN0EkJFQmBka(^;e<khk@Y7Ni)N<|qa#4CYNmf{H%X&CSLI+<{Z|?D| z(lAbKO?Slb?Nt_YdjNYVkR&C%JI}1#*&A!pJGUnl;*m;S_>AzSV+txK;zrE&zM;T{ zYp$<`eE)Y(w2dAlW$nu3q3a|^(L0nA+I)1$D}eDVI*P(KUnf2w(xfqrtChYzfJl_$ zj*m`5b%e*m@&GG9NTW5yKY9#D)QKB@6o_BRX9xDB8QR8ATw1FT{N?LYDnF_pbowcv z`vZXnt<k_i)AeYReD0w$QL8b$@$r~UWzu+5iKhoXx+Q&aYH+JjV*qS0B*aCREt6UC znVr?K;SVv~XioCdaZat|DtC{<$)~kYk5}!VUVgsBD(t%d{%|@JjOKT|Le#3<>l+%X zlk_ohuA$8%W%2DHz%E>y{pXL159$qu)V=!%)ZEGPg6vi<P1q<3P)So(3mf@YSy`U8 zi;4b9bDWsI%H7lR@-|5_1??d$FFu>6Uy)u(^cq%wfi)|{i1!=GBaUal`N*f5mj1y} z8+8fze3*Ak7);EgXz@V-qD@MGfjOJ(CKevU%~?VuH>s+O1Lvovl3+3T1M~$E^=EoV z?h>*ZqUI24?#*f<_t*XK2a1GfZi?ihzaJaR@o(K4*Ljm*e;Pt*czF%TJX`!UIWr?d zE-rX;6h&F*z-g8d%4Tcn>T2i{)3yQogYmtzYg<rJu14cD(tOY|H|JYED-T=<HX-Hv zVfP`(T+rcvfo3PxC<64*0}!{nc$}jVEGjDM<XR)yCg-ShI_6Nib+ypQM!SJKD8#af zVo1-^{ZF~~W-7y2zpodV`T5gcarnp(?vWLLP5UV5OrUNY9v1JsFcw6Y>vP?-k9w(P z5j^F)7vzEP%&YltEy+5e6+_3%WrZcwKGA25X@|Sg4Z}@?guQPEU-qS=&u@y^x&rUM ul$T4ab`HCDRNLpCs;J=d*_HpppGV#f2f~wB=3C#<O5I}7k%$hV^6bBf8TIV| literal 0 HcmV?d00001 diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx index a0169fe15..0cecaa4d1 100644 --- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx +++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx @@ -8,6 +8,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import GeminiLogo from "@/media/llmprovider/gemini.png"; +import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import PreLoader from "@/components/Preloader"; @@ -19,6 +20,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions"; import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions"; +import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions"; export default function GeneralLLMPreference() { const [saving, setSaving] = useState(false); @@ -163,6 +165,15 @@ export default function GeneralLLMPreference() { image={LocalAiLogo} onClick={updateLLMChoice} /> + <LLMProviderOption + name="Ollama" + value="ollama" + link="ollama.ai" + description="Run LLMs locally on your own machine." + checked={llmChoice === "ollama"} + image={OllamaLogo} + onClick={updateLLMChoice} + /> {!window.location.hostname.includes("useanything.com") && ( <LLMProviderOption name="Custom Llama Model" @@ -193,6 +204,9 @@ export default function GeneralLLMPreference() { {llmChoice === "localai" && ( <LocalAiOptions settings={settings} showAlert={true} /> )} + {llmChoice === "ollama" && ( + <OllamaLLMOptions settings={settings} /> + )} {llmChoice === "native" && ( <NativeLLMOptions settings={settings} /> )} diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/DataHandling/index.jsx index cd63d74d8..81b93c5dc 100644 --- a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/DataHandling/index.jsx +++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/DataHandling/index.jsx @@ -5,6 +5,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import GeminiLogo from "@/media/llmprovider/gemini.png"; +import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import ChromaLogo from "@/media/vectordbs/chroma.png"; @@ -61,6 +62,13 @@ const LLM_SELECTION_PRIVACY = { ], logo: LocalAiLogo, }, + ollama: { + name: "Ollama", + description: [ + "Your model and chats are only accessible on the machine running Ollama models", + ], + logo: OllamaLogo, + }, native: { name: "Custom Llama Model", description: [ diff --git a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx index f877e31db..850dea3c2 100644 --- a/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx +++ b/frontend/src/pages/OnboardingFlow/OnboardingModal/Steps/LLMSelection/index.jsx @@ -4,6 +4,7 @@ import OpenAiLogo from "@/media/llmprovider/openai.png"; import AzureOpenAiLogo from "@/media/llmprovider/azure.png"; import AnthropicLogo from "@/media/llmprovider/anthropic.png"; import GeminiLogo from "@/media/llmprovider/gemini.png"; +import OllamaLogo from "@/media/llmprovider/ollama.png"; import LMStudioLogo from "@/media/llmprovider/lmstudio.png"; import LocalAiLogo from "@/media/llmprovider/localai.png"; import System from "@/models/system"; @@ -16,6 +17,7 @@ import LMStudioOptions from "@/components/LLMSelection/LMStudioOptions"; import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions"; import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions"; import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions"; +import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions"; function LLMSelection({ nextStep, prevStep, currentStep }) { const [llmChoice, setLLMChoice] = useState("openai"); @@ -124,13 +126,24 @@ function LLMSelection({ nextStep, prevStep, currentStep }) { onClick={updateLLMChoice} /> <LLMProviderOption - name="Custom Llama Model" - value="native" - description="Use a downloaded custom Llama model for chatting on this AnythingLLM instance." - checked={llmChoice === "native"} - image={AnythingLLMIcon} + name="Ollama" + value="ollama" + link="ollama.ai" + description="Run LLMs locally on your own machine." + checked={llmChoice === "ollama"} + image={OllamaLogo} onClick={updateLLMChoice} /> + {!window.location.hostname.includes("useanything.com") && ( + <LLMProviderOption + name="Custom Llama Model" + value="native" + description="Use a downloaded custom Llama model for chatting on this AnythingLLM instance." + checked={llmChoice === "native"} + image={AnythingLLMIcon} + onClick={updateLLMChoice} + /> + )} </div> <div className="mt-4 flex flex-wrap gap-4 max-w-[752px]"> {llmChoice === "openai" && <OpenAiOptions settings={settings} />} @@ -143,6 +156,7 @@ function LLMSelection({ nextStep, prevStep, currentStep }) { <LMStudioOptions settings={settings} /> )} {llmChoice === "localai" && <LocalAiOptions settings={settings} />} + {llmChoice === "ollama" && <OllamaLLMOptions settings={settings} />} {llmChoice === "native" && <NativeLLMOptions settings={settings} />} </div> </div> diff --git a/server/.env.example b/server/.env.example index f73e0e083..07abed62f 100644 --- a/server/.env.example +++ b/server/.env.example @@ -32,6 +32,11 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea # LOCAL_AI_MODEL_TOKEN_LIMIT=4096 # LOCAL_AI_API_KEY="sk-123abc" +# LLM_PROVIDER='ollama' +# OLLAMA_BASE_PATH='http://host.docker.internal:11434' +# OLLAMA_MODEL_PREF='llama2' +# OLLAMA_MODEL_TOKEN_LIMIT=4096 + ########################################### ######## Embedding API SElECTION ########## ########################################### diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js index b5dfeb700..a66f93e19 100644 --- a/server/models/systemSettings.js +++ b/server/models/systemSettings.js @@ -126,6 +126,20 @@ const SystemSettings = { AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, } : {}), + + ...(llmProvider === "ollama" + ? { + OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH, + OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF, + OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT, + + // For embedding credentials when ollama is selected. + OpenAiKey: !!process.env.OPEN_AI_KEY, + AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT, + AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY, + AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF, + } + : {}), ...(llmProvider === "native" ? { NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF, diff --git a/server/utils/AiProviders/ollama/index.js b/server/utils/AiProviders/ollama/index.js new file mode 100644 index 000000000..3aa58f760 --- /dev/null +++ b/server/utils/AiProviders/ollama/index.js @@ -0,0 +1,208 @@ +const { chatPrompt } = require("../../chats"); + +// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md +class OllamaAILLM { + constructor(embedder = null) { + if (!process.env.OLLAMA_BASE_PATH) + throw new Error("No Ollama Base Path was set."); + + this.basePath = process.env.OLLAMA_BASE_PATH; + this.model = process.env.OLLAMA_MODEL_PREF; + this.limits = { + history: this.promptWindowLimit() * 0.15, + system: this.promptWindowLimit() * 0.15, + user: this.promptWindowLimit() * 0.7, + }; + + if (!embedder) + throw new Error( + "INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM." + ); + this.embedder = embedder; + } + + streamingEnabled() { + return "streamChat" in this && "streamGetChatCompletion" in this; + } + + // Ensure the user set a value for the token limit + // and if undefined - assume 4096 window. + promptWindowLimit() { + const limit = process.env.OLLAMA_MODEL_TOKEN_LIMIT || 4096; + if (!limit || isNaN(Number(limit))) + throw new Error("No Ollama token context limit was set."); + return Number(limit); + } + + async isValidChatCompletionModel(_ = "") { + return true; + } + + constructPrompt({ + systemPrompt = "", + contextTexts = [], + chatHistory = [], + userPrompt = "", + }) { + const prompt = { + role: "system", + content: `${systemPrompt} +Context: + ${contextTexts + .map((text, i) => { + return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`; + }) + .join("")}`, + }; + return [prompt, ...chatHistory, { role: "user", content: userPrompt }]; + } + + async isSafe(_input = "") { + // Not implemented so must be stubbed + return { safe: true, reasons: [] }; + } + + async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + const textResponse = await fetch(`${this.basePath}/api/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.model, + stream: false, + options: { + temperature: Number(workspace?.openAiTemp ?? 0.7), + }, + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }), + }) + .then((res) => { + if (!res.ok) + throw new Error(`Ollama:sendChat ${res.status} ${res.statusText}`); + return res.json(); + }) + .then((data) => data?.message?.content) + .catch((e) => { + console.error(e); + throw new Error(`Ollama::sendChat failed with: ${error.message}`); + }); + + if (!textResponse.length) + throw new Error(`Ollama::sendChat text response was empty.`); + + return textResponse; + } + + async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) { + const response = await fetch(`${this.basePath}/api/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.model, + stream: true, + options: { + temperature: Number(workspace?.openAiTemp ?? 0.7), + }, + messages: await this.compressMessages( + { + systemPrompt: chatPrompt(workspace), + userPrompt: prompt, + chatHistory, + }, + rawHistory + ), + }), + }).catch((e) => { + console.error(e); + throw new Error(`Ollama:streamChat ${error.message}`); + }); + + return { type: "ollamaStream", response }; + } + + async getChatCompletion(messages = null, { temperature = 0.7 }) { + const textResponse = await fetch(`${this.basePath}/api/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.model, + messages, + stream: false, + options: { + temperature, + }, + }), + }) + .then((res) => { + if (!res.ok) + throw new Error( + `Ollama:getChatCompletion ${res.status} ${res.statusText}` + ); + return res.json(); + }) + .then((data) => data?.message?.content) + .catch((e) => { + console.error(e); + throw new Error( + `Ollama::getChatCompletion failed with: ${error.message}` + ); + }); + + if (!textResponse.length) + throw new Error(`Ollama::getChatCompletion text response was empty.`); + + return textResponse; + } + + async streamGetChatCompletion(messages = null, { temperature = 0.7 }) { + const response = await fetch(`${this.basePath}/api/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model: this.model, + stream: true, + messages, + options: { + temperature, + }, + }), + }).catch((e) => { + console.error(e); + throw new Error(`Ollama:streamGetChatCompletion ${error.message}`); + }); + + return { type: "ollamaStream", response }; + } + + // Simple wrapper for dynamic embedder & normalize interface for all LLM implementations + async embedTextInput(textInput) { + return await this.embedder.embedTextInput(textInput); + } + async embedChunks(textChunks = []) { + return await this.embedder.embedChunks(textChunks); + } + + async compressMessages(promptArgs = {}, rawHistory = []) { + const { messageArrayCompressor } = require("../../helpers/chat"); + const messageArray = this.constructPrompt(promptArgs); + return await messageArrayCompressor(this, messageArray, rawHistory); + } +} + +module.exports = { + OllamaAILLM, +}; diff --git a/server/utils/chats/stream.js b/server/utils/chats/stream.js index 5bdb7a1f0..b0dc9186b 100644 --- a/server/utils/chats/stream.js +++ b/server/utils/chats/stream.js @@ -199,6 +199,7 @@ async function streamEmptyEmbeddingChat({ return; } +// TODO: Refactor this implementation function handleStreamResponses(response, stream, responseProps) { const { uuid = uuidv4(), sources = [] } = responseProps; @@ -231,6 +232,34 @@ function handleStreamResponses(response, stream, responseProps) { }); } + if (stream?.type === "ollamaStream") { + return new Promise(async (resolve) => { + let fullText = ""; + for await (const dataChunk of stream.response.body) { + const chunk = JSON.parse(Buffer.from(dataChunk).toString()); + fullText += chunk.message.content; + writeResponseChunk(response, { + uuid, + sources: [], + type: "textResponseChunk", + textResponse: chunk.message.content, + close: false, + error: false, + }); + } + + writeResponseChunk(response, { + uuid, + sources, + type: "textResponseChunk", + textResponse: "", + close: true, + error: false, + }); + resolve(fullText); + }); + } + // If stream is not a regular OpenAI Stream (like if using native model) // we can just iterate the stream content instead. if (!stream.hasOwnProperty("data")) { diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index 3b4397c31..5bd7b299e 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -1,4 +1,4 @@ -const SUPPORT_CUSTOM_MODELS = ["openai", "localai", "native-llm"]; +const SUPPORT_CUSTOM_MODELS = ["openai", "localai", "ollama", "native-llm"]; async function getCustomModels(provider = "", apiKey = null, basePath = null) { if (!SUPPORT_CUSTOM_MODELS.includes(provider)) @@ -9,6 +9,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) { return await openAiModels(apiKey); case "localai": return await localAIModels(basePath, apiKey); + case "ollama": + return await ollamaAIModels(basePath, apiKey); case "native-llm": return nativeLLMModels(); default: @@ -59,6 +61,37 @@ async function localAIModels(basePath = null, apiKey = null) { return { models, error: null }; } +async function ollamaAIModels(basePath = null, _apiKey = null) { + let url; + try { + new URL(basePath); + if (basePath.split("").slice(-1)?.[0] === "/") + throw new Error("BasePath Cannot end in /!"); + url = basePath; + } catch { + return { models: [], error: "Not a valid URL." }; + } + + const models = await fetch(`${url}/api/tags`) + .then((res) => { + if (!res.ok) + throw new Error(`Could not reach Ollama server! ${res.status}`); + return res.json(); + }) + .then((data) => data?.models || []) + .then((models) => + models.map((model) => { + return { id: model.name }; + }) + ) + .catch((e) => { + console.error(e); + return []; + }); + + return { models, error: null }; +} + function nativeLLMModels() { const fs = require("fs"); const path = require("path"); diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js index 115df4003..bde5e8a0a 100644 --- a/server/utils/helpers/index.js +++ b/server/utils/helpers/index.js @@ -43,6 +43,9 @@ function getLLMProvider() { case "localai": const { LocalAiLLM } = require("../AiProviders/localAi"); return new LocalAiLLM(embedder); + case "ollama": + const { OllamaAILLM } = require("../AiProviders/ollama"); + return new OllamaAILLM(embedder); case "native": const { NativeLLM } = require("../AiProviders/native"); return new NativeLLM(embedder); diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js index fe4f4f5c9..11278f97f 100644 --- a/server/utils/helpers/updateENV.js +++ b/server/utils/helpers/updateENV.js @@ -81,6 +81,19 @@ const KEY_MAPPING = { checks: [], }, + OllamaLLMBasePath: { + envKey: "OLLAMA_BASE_PATH", + checks: [isNotEmpty, validOllamaLLMBasePath], + }, + OllamaLLMModelPref: { + envKey: "OLLAMA_MODEL_PREF", + checks: [], + }, + OllamaLLMTokenLimit: { + envKey: "OLLAMA_MODEL_TOKEN_LIMIT", + checks: [nonZero], + }, + // Native LLM Settings NativeLLMModelPref: { envKey: "NATIVE_LLM_MODEL_PREF", @@ -208,6 +221,17 @@ function validLLMExternalBasePath(input = "") { } } +function validOllamaLLMBasePath(input = "") { + try { + new URL(input); + if (input.split("").slice(-1)?.[0] === "/") + return "URL cannot end with a slash"; + return null; + } catch { + return "Not a valid URL"; + } +} + function supportedLLM(input = "") { return [ "openai", @@ -216,6 +240,7 @@ function supportedLLM(input = "") { "gemini", "lmstudio", "localai", + "ollama", "native", ].includes(input); }