diff --git a/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
index 456b50427..8f5a00b66 100644
--- a/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
@@ -31,40 +31,36 @@ export default function GenericOpenAiOptions({ settings }) {
           spellCheck={false}
         />
       </div>
-      {!settings?.credentialsOnly && (
-        <>
-          <div className="flex flex-col w-60">
-            <label className="text-white text-sm font-semibold block mb-4">
-              Chat Model Name
-            </label>
-            <input
-              type="text"
-              name="GenericOpenAiModelPref"
-              className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
-              placeholder="Model id used for chat requests"
-              defaultValue={settings?.GenericOpenAiModelPref}
-              required={true}
-              autoComplete="off"
-            />
-          </div>
-          <div className="flex flex-col w-60">
-            <label className="text-white text-sm font-semibold block mb-4">
-              Token context window
-            </label>
-            <input
-              type="number"
-              name="GenericOpenAiTokenLimit"
-              className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
-              placeholder="Content window limit (eg: 4096)"
-              min={1}
-              onScroll={(e) => e.target.blur()}
-              defaultValue={settings?.GenericOpenAiTokenLimit}
-              required={true}
-              autoComplete="off"
-            />
-          </div>
-        </>
-      )}
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Chat Model Name
+        </label>
+        <input
+          type="text"
+          name="GenericOpenAiModelPref"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Model id used for chat requests"
+          defaultValue={settings?.GenericOpenAiModelPref}
+          required={true}
+          autoComplete="off"
+        />
+      </div>
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Token context window
+        </label>
+        <input
+          type="number"
+          name="GenericOpenAiTokenLimit"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Content window limit (eg: 4096)"
+          min={1}
+          onScroll={(e) => e.target.blur()}
+          defaultValue={settings?.GenericOpenAiTokenLimit}
+          required={true}
+          autoComplete="off"
+        />
+      </div>
     </div>
   );
 }
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index 0575f34c0..271d91974 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -159,6 +159,12 @@ export const AVAILABLE_LLM_PROVIDERS = [
     options: (settings) => <GenericOpenAiOptions settings={settings} />,
     description:
       "Connect to any OpenAi-compatible service via a custom configuration",
+    requiredConfig: [
+      "GenericOpenAiBasePath",
+      "GenericOpenAiModelPref",
+      "GenericOpenAiTokenLimit",
+      "GenericOpenAiKey",
+    ],
   },
   {
     name: "Native",
diff --git a/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
index 19e6e5daf..511ed34bc 100644
--- a/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/ChatSettings/WorkspaceLLMSelection/index.jsx
@@ -5,6 +5,9 @@ import { AVAILABLE_LLM_PROVIDERS } from "@/pages/GeneralSettings/LLMPreference";
 import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
 import ChatModelSelection from "../ChatModelSelection";
 
+// Some providers can only be associated with a single model.
+// In that case there is no selection to be made so we can just move on.
+const NO_MODEL_SELECTION = ["default", "huggingface", "generic-openai"];
 const DISABLED_PROVIDERS = ["azure", "lmstudio", "native"];
 const LLM_DEFAULT = {
   name: "System default",
@@ -145,7 +148,7 @@ export default function WorkspaceLLMSelection({
           </button>
         )}
       </div>
-      {selectedLLM !== "default" && (
+      {!NO_MODEL_SELECTION.includes(selectedLLM) && (
         <div className="mt-4 flex flex-col gap-y-1">
           <ChatModelSelection
             provider={selectedLLM}