Bump GPT4All response generation batch size to 512 from 256

A batch size of 512 performs ~20% better on a XPS with no GPU and 16Gb
RAM. Seems worth the tradeoff for now
This commit is contained in:
Debanjum Singh Solanky 2023-08-01 23:34:02 -07:00
parent e42fd8ae91
commit 7c1d70aa17

View file

@ -61,7 +61,7 @@ def extract_questions_offline(
message = system_prompt + example_questions message = system_prompt + example_questions
state.chat_lock.acquire() state.chat_lock.acquire()
try: try:
response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=256) response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=512)
finally: finally:
state.chat_lock.release() state.chat_lock.release()
@ -167,7 +167,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
prompted_message = templated_system_message + chat_history + templated_user_message prompted_message = templated_system_message + chat_history + templated_user_message
state.chat_lock.acquire() state.chat_lock.acquire()
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256) response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=512)
try: try:
for response in response_iterator: for response in response_iterator:
if any(stop_word in response.strip() for stop_word in stop_words): if any(stop_word in response.strip() for stop_word in stop_words):