mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-27 17:35:07 +01:00
Bump GPT4All response generation batch size to 512 from 256
A batch size of 512 performs ~20% better on a XPS with no GPU and 16Gb RAM. Seems worth the tradeoff for now
This commit is contained in:
parent
e42fd8ae91
commit
7c1d70aa17
1 changed files with 2 additions and 2 deletions
|
@ -61,7 +61,7 @@ def extract_questions_offline(
|
||||||
message = system_prompt + example_questions
|
message = system_prompt + example_questions
|
||||||
state.chat_lock.acquire()
|
state.chat_lock.acquire()
|
||||||
try:
|
try:
|
||||||
response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=256)
|
response = gpt4all_model.generate(message, max_tokens=200, top_k=2, temp=0, n_batch=512)
|
||||||
finally:
|
finally:
|
||||||
state.chat_lock.release()
|
state.chat_lock.release()
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ def llm_thread(g, messages: List[ChatMessage], model: GPT4All):
|
||||||
prompted_message = templated_system_message + chat_history + templated_user_message
|
prompted_message = templated_system_message + chat_history + templated_user_message
|
||||||
|
|
||||||
state.chat_lock.acquire()
|
state.chat_lock.acquire()
|
||||||
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=256)
|
response_iterator = model.generate(prompted_message, streaming=True, max_tokens=500, n_batch=512)
|
||||||
try:
|
try:
|
||||||
for response in response_iterator:
|
for response in response_iterator:
|
||||||
if any(stop_word in response.strip() for stop_word in stop_words):
|
if any(stop_word in response.strip() for stop_word in stop_words):
|
||||||
|
|
Loading…
Reference in a new issue