Put offline model response generation behind the chat lock as well

Not just the chat response streaming
This commit is contained in:
Debanjum Singh Solanky 2023-08-01 21:52:24 -07:00
parent 1812473d27
commit 44292afff2

View file

@ -165,8 +165,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
prompted_message = templated_system_message + chat_history + templated_user_message
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
state.chat_lock.acquire()
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
try:
for response in response_iterator:
if any(stop_word in response.strip() for stop_word in stop_words):