diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index d0c4ff31..ba3966f6 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -165,8 +165,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All): templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content) templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content) prompted_message = templated_system_message + chat_history + templated_user_message - response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) + state.chat_lock.acquire() + response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) try: for response in response_iterator: if any(stop_word in response.strip() for stop_word in stop_words):