From 7c1d70aa17d50595abcdd5e45a6ea6ed3aff4caf Mon Sep 17 00:00:00 2001 From: Debanjum Singh Solanky Date: Tue, 1 Aug 2023 23:34:02 -0700 Subject: [PATCH] Bump GPT4All response generation batch size to 512 from 256 A batch size of 512 performs ~20% better on a XPS with no GPU and 16Gb RAM. Seems worth the tradeoff for now --- src/khoj/processor/conversation/gpt4all/chat_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/khoj/processor/conversation/gpt4all/chat_model.py b/src/khoj/processor/conversation/gpt4all/chat_model.py index ba3966f6..1e286cd2 100644 --- a/src/khoj/processor/conversation/gpt4all/chat_model.py +++ b/src/khoj/processor/conversation/gpt4all/chat_model.py @@ -61,7 +61,7 @@ def extract_questions_offline( message = system_prompt + example_questions state.chat_lock.acquire() try: - response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=256) + response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=512) finally: state.chat_lock.release() @@ -167,7 +167,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All): prompted_message = templated_system_message + chat_history + templated_user_message state.chat_lock.acquire() - response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) + response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=512) try: for response in response_iterator: if any(stop_word in response.strip() for stop_word in stop_words):