Use default offline/openai chat model to extract DB search queries

Make usage of the first offline/openai chat model as the default LLM
to use for background tasks more explicit

The idea is to use the default/first chat model for all background
activities, like user message to extract search queries to perform.
This is controlled by the server admin.

The chat model set by the user is used for user-facing functions like
generating chat responses
This commit is contained in:
Debanjum Singh Solanky 2024-01-03 13:51:15 +05:30
parent e28adf2884
commit 4a234c8db3
2 changed files with 6 additions and 5 deletions

View file

@ -382,7 +382,7 @@ class ConversationAdapters:
return await OfflineChatProcessorConversationConfig.objects.filter(enabled=True).aexists()
@staticmethod
async def get_offline_chat():
async def get_default_offline_llm():
return await ChatModelOptions.objects.filter(model_type="offline").afirst()
@staticmethod
@ -397,7 +397,7 @@ class ConversationAdapters:
return await OpenAIProcessorConversationConfig.objects.filter().aexists()
@staticmethod
async def get_openai_chat():
async def get_default_openai_llm():
return await ChatModelOptions.objects.filter(model_type="openai").afirst()
@staticmethod

View file

@ -853,8 +853,8 @@ async def extract_references_and_questions(
and conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE
):
using_offline_chat = True
offline_chat = await ConversationAdapters.get_offline_chat()
chat_model = offline_chat.chat_model
default_offline_llm = await ConversationAdapters.get_default_offline_llm()
chat_model = default_offline_llm.chat_model
if state.gpt4all_processor_config is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
@ -865,8 +865,9 @@ async def extract_references_and_questions(
)
elif conversation_config and conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
openai_chat_config = await ConversationAdapters.get_openai_chat_config()
default_openai_llm = await ConversationAdapters.get_default_openai_llm()
api_key = openai_chat_config.api_key
chat_model = conversation_config.chat_model
chat_model = default_openai_llm.chat_model
inferred_queries = extract_questions(
defiltered_query, model=chat_model, api_key=api_key, conversation_log=meta_log
)