diff --git a/server/utils/AiProviders/nvidiaNim/index.js b/server/utils/AiProviders/nvidiaNim/index.js
index 6deb7b2e4..554b0eec5 100644
--- a/server/utils/AiProviders/nvidiaNim/index.js
+++ b/server/utils/AiProviders/nvidiaNim/index.js
@@ -45,7 +45,7 @@ class NvidiaNimLLM {
   }
 
   /**
-   * Set the model token limit `NVIDIA_NIM_LLM_TOKEN_LIMIT` for the given model ID
+   * Set the model token limit `NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT` for the given model ID
    * @param {string} modelId
    * @param {string} basePath
    * @returns {Promise<void>}
@@ -69,7 +69,7 @@ class NvidiaNimLLM {
     if (!model.length) return;
     const modelInfo = model.find((model) => model.id === modelId);
     if (!modelInfo) return;
-    process.env.NVIDIA_NIM_LLM_TOKEN_LIMIT = Number(
+    process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT = Number(
       modelInfo.max_model_len || 4096
     );
   }
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 3165dc40a..2ae930a6c 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -965,6 +965,9 @@ function dumpENV() {
     "SIMPLE_SSO_ENABLED",
     // Community Hub
     "COMMUNITY_HUB_BUNDLE_DOWNLOADS_ENABLED",
+
+    // Nvidia NIM Keys that are automatically managed
+    "NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT",
   ];
 
   // Simple sanitization of each value to prevent ENV injection via newline or quote escaping.