mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-30 19:03:01 +01:00
Put offline model response generation behind the chat lock as well
Not just the chat response streaming
This commit is contained in:
parent
1812473d27
commit
44292afff2
1 changed files with 2 additions and 1 deletions
|
@ -165,8 +165,9 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
|||
templated_system_message = prompts.system_prompt_llamav2.format(message=system_message.content)
|
||||
templated_user_message = prompts.general_conversation_llamav2.format(query=user_message.content)
|
||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
||||
|
||||
state.chat_lock.acquire()
|
||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
||||
try:
|
||||
for response in response_iterator:
|
||||
if any(stop_word in response.strip() for stop_word in stop_words):
|
||||
|
|
Loading…
Reference in a new issue