mirror of
https://github.com/khoj-ai/khoj.git
synced 2025-02-17 16:14:21 +00:00
Fix the mechanism to retrieve the message content
This commit is contained in:
parent
f0efe0177e
commit
0e63a90377
1 changed files with 2 additions and 2 deletions
|
@ -98,10 +98,10 @@ def generate_chatml_messages_with_context(
|
||||||
|
|
||||||
# Truncate oldest messages from conversation history until under max supported prompt size by model
|
# Truncate oldest messages from conversation history until under max supported prompt size by model
|
||||||
encoder = tiktoken.encoding_for_model(model_name)
|
encoder = tiktoken.encoding_for_model(model_name)
|
||||||
tokens = sum([len(encoder.encode(content)) for message in messages for content in message.content])
|
tokens = sum([len(encoder.encode(message.content)) for message in messages])
|
||||||
while tokens > max_prompt_size[model_name] and len(messages) > 1:
|
while tokens > max_prompt_size[model_name] and len(messages) > 1:
|
||||||
messages.pop()
|
messages.pop()
|
||||||
tokens = sum([len(encoder.encode(content)) for message in messages for content in message.content])
|
tokens = sum([len(encoder.encode(message.content)) for message in messages])
|
||||||
|
|
||||||
# Truncate last message if still over max supported prompt size by model
|
# Truncate last message if still over max supported prompt size by model
|
||||||
if tokens > max_prompt_size[model_name]:
|
if tokens > max_prompt_size[model_name]:
|
||||||
|
|
Loading…
Add table
Reference in a new issue