diff --git a/docker/.env.example b/docker/.env.example
index 058046596..2b3d10629 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -164,6 +164,7 @@ GID='1000'
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
 # EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
 # GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
+# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
 
 ###########################################
 ######## Vector Database Selection ########
@@ -299,4 +300,4 @@ GID='1000'
 
 # Enable simple SSO passthrough to pre-authenticate users from a third party service.
 # See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
-# SIMPLE_SSO_ENABLED=1
\ No newline at end of file
+# SIMPLE_SSO_ENABLED=1
diff --git a/frontend/src/components/EmbeddingSelection/GenericOpenAiOptions/index.jsx b/frontend/src/components/EmbeddingSelection/GenericOpenAiOptions/index.jsx
index e524a263e..84ae4ab8b 100644
--- a/frontend/src/components/EmbeddingSelection/GenericOpenAiOptions/index.jsx
+++ b/frontend/src/components/EmbeddingSelection/GenericOpenAiOptions/index.jsx
@@ -1,4 +1,8 @@
+import React, { useState } from "react";
+import { CaretDown, CaretUp } from "@phosphor-icons/react";
+
 export default function GenericOpenAiEmbeddingOptions({ settings }) {
+  const [showAdvancedControls, setShowAdvancedControls] = useState(false);
   return (
     <div className="w-full flex flex-col gap-y-7">
       <div className="w-full flex items-center gap-[36px] mt-1.5 flex-wrap">
@@ -69,6 +73,46 @@ export default function GenericOpenAiEmbeddingOptions({ settings }) {
           />
         </div>
       </div>
+      <div className="flex justify-start mt-4">
+        <button
+          onClick={(e) => {
+            e.preventDefault();
+            setShowAdvancedControls(!showAdvancedControls);
+          }}
+          className="text-white hover:text-white/70 flex items-center text-sm"
+        >
+          {showAdvancedControls ? "Hide" : "Show"} advanced settings
+          {showAdvancedControls ? (
+            <CaretUp size={14} className="ml-1" />
+          ) : (
+            <CaretDown size={14} className="ml-1" />
+          )}
+        </button>
+      </div>
+      <div hidden={!showAdvancedControls}>
+        <div className="w-full flex items-start gap-4">
+          <div className="flex flex-col w-60">
+            <div className="flex flex-col gap-y-1 mb-4">
+              <label className="text-white text-sm font-semibold flex items-center gap-x-2">
+                Max concurrent Chunks
+                <p className="!text-xs !italic !font-thin">optional</p>
+              </label>
+            </div>
+            <input
+              type="number"
+              name="GenericOpenAiEmbeddingMaxConcurrentChunks"
+              className="bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
+              placeholder="500"
+              min={1}
+              onScroll={(e) => e.target.blur()}
+              defaultValue={settings?.GenericOpenAiEmbeddingMaxConcurrentChunks}
+              required={false}
+              autoComplete="off"
+              spellCheck={false}
+            />
+          </div>
+        </div>
+      </div>
     </div>
   );
 }
diff --git a/server/.env.example b/server/.env.example
index 723b3a644..ba56517a8 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -154,6 +154,7 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
 # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
 # EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
 # GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
+# GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
 
 ###########################################
 ######## Vector Database Selection ########
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 9bd4a7bf1..dd54b8e36 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -193,6 +193,8 @@ const SystemSettings = {
         process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH,
       GenericOpenAiEmbeddingApiKey:
         !!process.env.GENERIC_OPEN_AI_EMBEDDING_API_KEY,
+      GenericOpenAiEmbeddingMaxConcurrentChunks:
+        process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS || 500,
 
       // --------------------------------------------------------
       // VectorDB Provider Selection Settings & Configs
diff --git a/server/utils/EmbeddingEngines/genericOpenAi/index.js b/server/utils/EmbeddingEngines/genericOpenAi/index.js
index d3ec30721..f26a070cc 100644
--- a/server/utils/EmbeddingEngines/genericOpenAi/index.js
+++ b/server/utils/EmbeddingEngines/genericOpenAi/index.js
@@ -14,13 +14,26 @@ class GenericOpenAiEmbedder {
     });
     this.model = process.env.EMBEDDING_MODEL_PREF ?? null;
 
-    // Limit of how many strings we can process in a single pass to stay with resource or network limits
-    this.maxConcurrentChunks = 500;
-
+    // this.maxConcurrentChunks is delegated to the getter below.
     // Refer to your specific model and provider you use this class with to determine a valid maxChunkLength
     this.embeddingMaxChunkLength = 8_191;
   }
 
+  /**
+   * returns the `GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS` env variable as a number
+   * or 500 if the env variable is not set or is not a number.
+   * @returns {number}
+   */
+  get maxConcurrentChunks() {
+    if (!process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS)
+      return 500;
+    if (
+      isNaN(Number(process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS))
+    )
+      return 500;
+    return Number(process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS);
+  }
+
   async embedTextInput(textInput) {
     const result = await this.embedChunks(
       Array.isArray(textInput) ? textInput : [textInput]
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index d547930a5..2c4384621 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -267,6 +267,10 @@ const KEY_MAPPING = {
     envKey: "GENERIC_OPEN_AI_EMBEDDING_API_KEY",
     checks: [],
   },
+  GenericOpenAiEmbeddingMaxConcurrentChunks: {
+    envKey: "GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS",
+    checks: [nonZero],
+  },
 
   // Vector Database Selection Settings
   VectorDB: {