From 3fd8614a4ba74dea200104e1570a351b37ffdb60 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Sun, 8 Dec 2024 18:05:14 -0800 Subject: [PATCH] Only auto load available chat models from Ollama provider for now Allowing models from any openai proxy service makes it too unwieldy. And a bunch of them do not even support this endpoint. --- src/khoj/utils/initialization.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/khoj/utils/initialization.py b/src/khoj/utils/initialization.py index 2677da03..4dd46e5b 100644 --- a/src/khoj/utils/initialization.py +++ b/src/khoj/utils/initialization.py @@ -235,6 +235,10 @@ def initialization(interactive: bool = True): # Get OpenAI configs with custom base URLs custom_configs = AiModelApi.objects.exclude(api_base_url__isnull=True) + # Only enable for whitelisted provider names (i.e Ollama) for now + # TODO: This is hacky. Will be replaced with more robust solution based on provider type enum + custom_configs = custom_configs.filter(name__in=["Ollama"]) + for config in custom_configs: try: # Create OpenAI client with custom base URL