mirror of
https://github.com/khoj-ai/khoj.git
synced 2025-02-17 08:04:21 +00:00
Do not log the gpt4all chat response stream in khoj backend
Stream floods stdout and does not provide useful info to user
This commit is contained in:
parent
5ccb01343e
commit
f76af869f1
1 changed files with 0 additions and 1 deletions
|
@ -163,6 +163,5 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
||||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=2000)
|
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=2000)
|
||||||
for response in response_iterator:
|
for response in response_iterator:
|
||||||
logger.info(response)
|
|
||||||
g.send(response)
|
g.send(response)
|
||||||
g.close()
|
g.close()
|
||||||
|
|
Loading…
Add table
Reference in a new issue