Do not log the gpt4all chat response stream in khoj backend

Stream floods stdout and does not provide useful info to user
This commit is contained in:
Debanjum Singh Solanky 2023-07-28 19:14:04 -07:00
parent 5ccb01343e
commit f76af869f1

View file

@ -163,6 +163,5 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
prompted_message = templated_system_message + chat_history + templated_user_message
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=2000)
for response in response_iterator:
logger.info(response)
g.send(response)
g.close()