From c1c779a7ef2d02a93a49b1fa8ca86b4b846fe8b4 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Fri, 1 Nov 2024 12:04:38 -0700 Subject: [PATCH] Do not yaml format raw code results in context for LLM. It's confusing --- src/khoj/processor/conversation/anthropic/anthropic_chat.py | 2 +- src/khoj/processor/conversation/google/gemini_chat.py | 2 +- src/khoj/processor/conversation/offline/chat_model.py | 4 +--- src/khoj/processor/conversation/openai/gpt.py | 2 +- src/khoj/processor/conversation/utils.py | 2 +- 5 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/khoj/processor/conversation/anthropic/anthropic_chat.py b/src/khoj/processor/conversation/anthropic/anthropic_chat.py index 1ca2095d..e2fd0c74 100644 --- a/src/khoj/processor/conversation/anthropic/anthropic_chat.py +++ b/src/khoj/processor/conversation/anthropic/anthropic_chat.py @@ -189,7 +189,7 @@ def converse_anthropic( if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands: context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n" if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results): - context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n" + context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n" context_message = context_message.strip() # Setup Prompt with Primer or Conversation History diff --git a/src/khoj/processor/conversation/google/gemini_chat.py b/src/khoj/processor/conversation/google/gemini_chat.py index 7a3ffe4d..f543bc6b 100644 --- a/src/khoj/processor/conversation/google/gemini_chat.py +++ b/src/khoj/processor/conversation/google/gemini_chat.py @@ -193,7 +193,7 @@ def converse_gemini( if ConversationCommand.Online in conversation_commands or ConversationCommand.Webpage in conversation_commands: context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n" if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results): - context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n" + context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n" context_message = context_message.strip() # Setup Prompt with Primer or Conversation History diff --git a/src/khoj/processor/conversation/offline/chat_model.py b/src/khoj/processor/conversation/offline/chat_model.py index a7afaca4..b3e1523c 100644 --- a/src/khoj/processor/conversation/offline/chat_model.py +++ b/src/khoj/processor/conversation/offline/chat_model.py @@ -160,8 +160,6 @@ def converse_offline( assert loaded_model is None or isinstance(loaded_model, Llama), "loaded_model must be of type Llama, if configured" offline_chat_model = loaded_model or download_model(model, max_tokens=max_prompt_size) tracer["chat_model"] = model - - compiled_references = "\n\n".join({f"# File: {item['file']}\n## {item['compiled']}\n" for item in references}) current_date = datetime.now() if agent and agent.personality: @@ -203,7 +201,7 @@ def converse_offline( context_message += f"{prompts.online_search_conversation_offline.format(online_results=yaml_dump(simplified_online_results))}\n\n" if ConversationCommand.Code in conversation_commands and not is_none_or_empty(code_results): - context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n" + context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n" context_message = context_message.strip() # Setup Prompt with Primer or Conversation History diff --git a/src/khoj/processor/conversation/openai/gpt.py b/src/khoj/processor/conversation/openai/gpt.py index ee7bac83..c376a90e 100644 --- a/src/khoj/processor/conversation/openai/gpt.py +++ b/src/khoj/processor/conversation/openai/gpt.py @@ -191,7 +191,7 @@ def converse( if not is_none_or_empty(online_results): context_message += f"{prompts.online_search_conversation.format(online_results=yaml_dump(online_results))}\n\n" if not is_none_or_empty(code_results): - context_message += f"{prompts.code_executed_context.format(code_results=yaml_dump(code_results))}\n\n" + context_message += f"{prompts.code_executed_context.format(code_results=str(code_results))}\n\n" context_message = context_message.strip() # Setup Prompt with Primer or Conversation History diff --git a/src/khoj/processor/conversation/utils.py b/src/khoj/processor/conversation/utils.py index b4db71d9..9970aefd 100644 --- a/src/khoj/processor/conversation/utils.py +++ b/src/khoj/processor/conversation/utils.py @@ -20,7 +20,7 @@ from langchain.schema import ChatMessage from llama_cpp.llama import Llama from transformers import AutoTokenizer -from khoj.database.adapters import ConversationAdapters, ais_user_subscribed +from khoj.database.adapters import ConversationAdapters from khoj.database.models import ChatModelOptions, ClientApplication, KhojUser from khoj.processor.conversation import prompts from khoj.processor.conversation.offline.utils import download_model, infer_max_tokens