From 4a234c8db3edc2969892aef8cb86548cddca8583 Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Wed, 3 Jan 2024 13:51:15 +0530 Subject: [PATCH] Use default offline/openai chat model to extract DB search queries Make usage of the first offline/openai chat model as the default LLM to use for background tasks more explicit The idea is to use the default/first chat model for all background activities, like user message to extract search queries to perform. This is controlled by the server admin. The chat model set by the user is used for user-facing functions like generating chat responses --- src/khoj/database/adapters/__init__.py | 4 ++-- src/khoj/routers/api.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/khoj/database/adapters/__init__.py b/src/khoj/database/adapters/__init__.py index 159d9306..014db373 100644 --- a/src/khoj/database/adapters/__init__.py +++ b/src/khoj/database/adapters/__init__.py @@ -382,7 +382,7 @@ class ConversationAdapters: return await OfflineChatProcessorConversationConfig.objects.filter(enabled=True).aexists() @staticmethod - async def get_offline_chat(): + async def get_default_offline_llm(): return await ChatModelOptions.objects.filter(model_type="offline").afirst() @staticmethod @@ -397,7 +397,7 @@ class ConversationAdapters: return await OpenAIProcessorConversationConfig.objects.filter().aexists() @staticmethod - async def get_openai_chat(): + async def get_default_openai_llm(): return await ChatModelOptions.objects.filter(model_type="openai").afirst() @staticmethod diff --git a/src/khoj/routers/api.py b/src/khoj/routers/api.py index 9ba2559c..575f094c 100644 --- a/src/khoj/routers/api.py +++ b/src/khoj/routers/api.py @@ -853,8 +853,8 @@ async def extract_references_and_questions( and conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE ): using_offline_chat = True - offline_chat = await ConversationAdapters.get_offline_chat() - chat_model = offline_chat.chat_model + default_offline_llm = await ConversationAdapters.get_default_offline_llm() + chat_model = default_offline_llm.chat_model if state.gpt4all_processor_config is None: state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model) @@ -865,8 +865,9 @@ async def extract_references_and_questions( ) elif conversation_config and conversation_config.model_type == ChatModelOptions.ModelType.OPENAI: openai_chat_config = await ConversationAdapters.get_openai_chat_config() + default_openai_llm = await ConversationAdapters.get_default_openai_llm() api_key = openai_chat_config.api_key - chat_model = conversation_config.chat_model + chat_model = default_openai_llm.chat_model inferred_queries = extract_questions( defiltered_query, model=chat_model, api_key=api_key, conversation_log=meta_log )