From ded606c7cb738de611f11f5e98a5822d1fa3bbb9 Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Mon, 31 Jul 2023 17:07:20 -0700 Subject: [PATCH] Fix format of user query during general conversation with Llama 2 --- src/khoj/processor/conversation/gpt4all/chat_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index c9e33c6a..20efa7a1 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -125,7 +125,7 @@ def converse_offline( # Get Conversation Primer appropriate to Conversation Type # TODO If compiled_references_message is too long, we need to truncate it. if compiled_references_message == "": - conversation_primer = prompts.conversation_llamav2.format(query=user_query) + conversation_primer = user_query else: conversation_primer = prompts.notes_conversation_llamav2.format( query=user_query, references=compiled_references_message