From 3f78ef413be9e3cf51770a05c3d8ea4eeb644ab6 Mon Sep 17 00:00:00 2001
From: Sean Hatfield <seanhatfield5@gmail.com>
Date: Tue, 28 May 2024 17:17:35 -0700
Subject: [PATCH] [FEAT] Support for gemini-1.0-pro model and fixes to prompt
 window limit (#1557)

support for gemini-1.0-pro model and fixes to prompt window limit
---
 .../src/components/LLMSelection/GeminiLLMOptions/index.jsx   | 1 +
 frontend/src/hooks/useGetProvidersModels.js                  | 2 +-
 server/utils/AiProviders/gemini/index.js                     | 5 +++++
 server/utils/helpers/updateENV.js                            | 1 +
 4 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
index 87e058827..cc25ae958 100644
--- a/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/GeminiLLMOptions/index.jsx
@@ -32,6 +32,7 @@ export default function GeminiLLMOptions({ settings }) {
               >
                 {[
                   "gemini-pro",
+                  "gemini-1.0-pro",
                   "gemini-1.5-pro-latest",
                   "gemini-1.5-flash-latest",
                 ].map((model) => {
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index 6687f0a7b..d61d4c36d 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -10,7 +10,7 @@ export const DISABLED_PROVIDERS = [
 ];
 const PROVIDER_DEFAULT_MODELS = {
   openai: [],
-  gemini: ["gemini-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"],
+  gemini: ["gemini-pro","gemini-1.0-pro", "gemini-1.5-pro-latest", "gemini-1.5-flash-latest"],
   anthropic: [
     "claude-instant-1.2",
     "claude-2.0",
diff --git a/server/utils/AiProviders/gemini/index.js b/server/utils/AiProviders/gemini/index.js
index 30c9ffa35..ef1845801 100644
--- a/server/utils/AiProviders/gemini/index.js
+++ b/server/utils/AiProviders/gemini/index.js
@@ -91,6 +91,10 @@ class GeminiLLM {
     switch (this.model) {
       case "gemini-pro":
         return 30_720;
+      case "gemini-1.0-pro":
+        return 30_720;
+      case "gemini-1.5-flash-latest":
+        return 1_048_576;
       case "gemini-1.5-pro-latest":
         return 1_048_576;
       default:
@@ -101,6 +105,7 @@ class GeminiLLM {
   isValidChatCompletionModel(modelName = "") {
     const validModels = [
       "gemini-pro",
+      "gemini-1.0-pro",
       "gemini-1.5-pro-latest",
       "gemini-1.5-flash-latest",
     ];
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index d6900ae57..d5cdc68f2 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -532,6 +532,7 @@ function supportedTranscriptionProvider(input = "") {
 function validGeminiModel(input = "") {
   const validModels = [
     "gemini-pro",
+    "gemini-1.0-pro",
     "gemini-1.5-pro-latest",
     "gemini-1.5-flash-latest",
   ];