mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-27 09:25:06 +01:00
Do not yaml format raw code results in context for LLM. It's confusing
This commit is contained in:
parent
cd75151431
commit
c1c779a7ef
5 changed files with 5 additions and 7 deletions
|
@ -189,7 +189,7 @@ def converse_anthropic(
|
||||||
if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands:
|
if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands:
|
||||||
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
||||||
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
||||||
context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n"
|
context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n"
|
||||||
context_message = context_message.strip()
|
context_message = context_message.strip()
|
||||||
|
|
||||||
# Setup Prompt with Primer or Conversation History
|
# Setup Prompt with Primer or Conversation History
|
||||||
|
|
|
@ -193,7 +193,7 @@ def converse_gemini(
|
||||||
if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands:
|
if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands:
|
||||||
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
||||||
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
||||||
context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n"
|
context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n"
|
||||||
context_message = context_message.strip()
|
context_message = context_message.strip()
|
||||||
|
|
||||||
# Setup Prompt with Primer or Conversation History
|
# Setup Prompt with Primer or Conversation History
|
||||||
|
|
|
@ -160,8 +160,6 @@ def converse_offline(
|
||||||
assert loaded_model is None or isinstance(loaded_model, Llama), "loaded_model must be of type Llama, if configured"
|
assert loaded_model is None or isinstance(loaded_model, Llama), "loaded_model must be of type Llama, if configured"
|
||||||
offline_chat_model = loaded_model or download_model(model, max_tokens=max_prompt_size)
|
offline_chat_model = loaded_model or download_model(model, max_tokens=max_prompt_size)
|
||||||
tracer["chat_model"] = model
|
tracer["chat_model"] = model
|
||||||
|
|
||||||
compiled_references = "\n\n".join({f"# File: {item['file']}\n## {item['compiled']}\n" for item in references})
|
|
||||||
current_date = datetime.now()
|
current_date = datetime.now()
|
||||||
|
|
||||||
if agent and agent.personality:
|
if agent and agent.personality:
|
||||||
|
@ -203,7 +201,7 @@ def converse_offline(
|
||||||
|
|
||||||
context_message += f"{prompts.online_search_conversation_offline.format(online_results=yaml_dump(simplified_online_results))}\n\n"
|
context_message += f"{prompts.online_search_conversation_offline.format(online_results=yaml_dump(simplified_online_results))}\n\n"
|
||||||
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results):
|
||||||
context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n"
|
context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n"
|
||||||
context_message = context_message.strip()
|
context_message = context_message.strip()
|
||||||
|
|
||||||
# Setup Prompt with Primer or Conversation History
|
# Setup Prompt with Primer or Conversation History
|
||||||
|
|
|
@ -191,7 +191,7 @@ def converse(
|
||||||
if not is_none_or_empty(online_results):
|
if not is_none_or_empty(online_results):
|
||||||
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n"
|
||||||
if not is_none_or_empty(code_results):
|
if not is_none_or_empty(code_results):
|
||||||
context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n"
|
context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n"
|
||||||
context_message = context_message.strip()
|
context_message = context_message.strip()
|
||||||
|
|
||||||
# Setup Prompt with Primer or Conversation History
|
# Setup Prompt with Primer or Conversation History
|
||||||
|
|
|
@ -20,7 +20,7 @@ from langchain.schema import ChatMessage
|
||||||
from llama_cpp.llama import Llama
|
from llama_cpp.llama import Llama
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
from khoj.database.adapters import ConversationAdapters, ais_user_subscribed
|
from khoj.database.adapters import ConversationAdapters
|
||||||
from khoj.database.models import ChatModelOptions, ClientApplication, KhojUser
|
from khoj.database.models import ChatModelOptions, ClientApplication, KhojUser
|
||||||
from khoj.processor.conversation import prompts
|
from khoj.processor.conversation import prompts
|
||||||
from khoj.processor.conversation.offline.utils import download_model, infer_max_tokens
|
from khoj.processor.conversation.offline.utils import download_model, infer_max_tokens
|
||||||
|
|
Loading…
Reference in a new issue