From 44292afff266066efb58cf5bc97e682627d7516b Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Tue, 1 Aug 2023 21:52:24 -0700 Subject: [PATCH] Put offline model response generation behind the chat lock as well Not just the chat response streaming --- src/khoj/processor/conversation/gpt4all/chat_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index d0c4ff31..ba3966f6 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -165,8 +165,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All): templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content) templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content) prompted_message = templated_system_message + chat_history + templated_user_message - response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) + state.chat_lock.acquire() + response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) try: for response in response_iterator: if any(stop_word in response.strip() for stop_word in stop_words):