From 897e168fd1dda3ea762e4dcee46ed01a25935524 Mon Sep 17 00:00:00 2001 From: Sean Hatfield <seanhatfield5@gmail.com> Date: Mon, 22 Apr 2024 13:14:27 -0700 Subject: [PATCH] [FEAT] Add support for more groq models (Llama 3 and Gemma) (#1143) add support for more groq models --- .../LLMSelection/GroqAiOptions/index.jsx | 8 +++++++- frontend/src/hooks/useGetProvidersModels.js | 8 +++++++- server/utils/AiProviders/groq/index.js | 15 +++++++++++++-- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx index c85f0f1e0..ceee0d703 100644 --- a/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx +++ b/frontend/src/components/LLMSelection/GroqAiOptions/index.jsx @@ -28,7 +28,13 @@ export default function GroqAiOptions({ settings }) { required={true} className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5" > - {["llama2-70b-4096", "mixtral-8x7b-32768"].map((model) => { + {[ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ].map((model) => { return ( <option key={model} value={model}> {model} diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js index b6cb403e1..aea66146a 100644 --- a/frontend/src/hooks/useGetProvidersModels.js +++ b/frontend/src/hooks/useGetProvidersModels.js @@ -19,7 +19,13 @@ const PROVIDER_DEFAULT_MODELS = { localai: [], ollama: [], togetherai: [], - groq: ["llama2-70b-4096", "mixtral-8x7b-32768"], + groq: [ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ], native: [], }; diff --git a/server/utils/AiProviders/groq/index.js b/server/utils/AiProviders/groq/index.js index 1b15fe1fe..c556d0357 100644 --- a/server/utils/AiProviders/groq/index.js +++ b/server/utils/AiProviders/groq/index.js @@ -40,20 +40,31 @@ class GroqLLM { streamingEnabled() { return "streamChat" in this && "streamGetChatCompletion" in this; } - promptWindowLimit() { switch (this.model) { case "llama2-70b-4096": return 4096; case "mixtral-8x7b-32768": return 32_768; + case "llama3-8b-8192": + return 8192; + case "llama3-70b-8192": + return 8192; + case "gemma-7b-it": + return 8192; default: return 4096; } } async isValidChatCompletionModel(modelName = "") { - const validModels = ["llama2-70b-4096", "mixtral-8x7b-32768"]; + const validModels = [ + "llama2-70b-4096", + "mixtral-8x7b-32768", + "llama3-8b-8192", + "llama3-70b-8192", + "gemma-7b-it", + ]; const isPreset = validModels.some((model) => modelName === model); if (isPreset) return true;