From d1844980388b4e1cc92cbda91083a3bcc13a71a2 Mon Sep 17 00:00:00 2001 From: Debanjum Date: Mon, 28 Oct 2024 05:59:32 -0700 Subject: [PATCH] Pass context in separate message from user query to research chat actor --- src/khoj/processor/conversation/prompts.py | 5 +---- src/khoj/routers/helpers.py | 17 +++++++++++------ src/khoj/routers/research.py | 4 ++-- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/src/khoj/processor/conversation/prompts.py b/src/khoj/processor/conversation/prompts.py index 3f166e71..4f4d1aff 100644 --- a/src/khoj/processor/conversation/prompts.py +++ b/src/khoj/processor/conversation/prompts.py @@ -673,10 +673,7 @@ Which of the tool AIs listed below would you use to answer my question? You **on Return the next tool AI to use and the query to ask it. Your response should always be a valid JSON object. Do not say anything else. Response format: -{{"scratchpad": "", "tool": "", "query": ""}} - -User: {query} -Khoj: +{{"scratchpad": "", "tool": "", "query": ""}} """.strip() ) diff --git a/src/khoj/routers/helpers.py b/src/khoj/routers/helpers.py index e6437a7d..52958cc1 100644 --- a/src/khoj/routers/helpers.py +++ b/src/khoj/routers/helpers.py @@ -799,7 +799,7 @@ async def generate_excalidraw_diagram_from_description( with timer("Chat actor: Generate excalidraw diagram", logger): raw_response = await send_message_to_model_wrapper( - message=excalidraw_diagram_generation, user=user, tracer=tracer + query=excalidraw_diagram_generation, user=user, tracer=tracer ) raw_response = raw_response.strip() raw_response = remove_json_codeblock(raw_response) @@ -879,11 +879,12 @@ async def generate_better_image_prompt( async def send_message_to_model_wrapper( - message: str, + query: str, system_message: str = "", response_type: str = "text", user: KhojUser = None, query_images: List[str] = None, + context: str = "", tracer: dict = {}, ): conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user) @@ -914,7 +915,8 @@ async def send_message_to_model_wrapper( loaded_model = state.offline_chat_processor_config.loaded_model truncated_messages = generate_chatml_messages_with_context( - user_message=message, + user_message=query, + context_message=context, system_message=system_message, model_name=chat_model, loaded_model=loaded_model, @@ -939,7 +941,8 @@ async def send_message_to_model_wrapper( api_key = openai_chat_config.api_key api_base_url = openai_chat_config.api_base_url truncated_messages = generate_chatml_messages_with_context( - user_message=message, + user_message=query, + context_message=context, system_message=system_message, model_name=chat_model, max_prompt_size=max_tokens, @@ -960,7 +963,8 @@ async def send_message_to_model_wrapper( elif model_type == ChatModelOptions.ModelType.ANTHROPIC: api_key = conversation_config.openai_config.api_key truncated_messages = generate_chatml_messages_with_context( - user_message=message, + user_message=query, + context_message=context, system_message=system_message, model_name=chat_model, max_prompt_size=max_tokens, @@ -979,7 +983,8 @@ async def send_message_to_model_wrapper( elif model_type == ChatModelOptions.ModelType.GOOGLE: api_key = conversation_config.openai_config.api_key truncated_messages = generate_chatml_messages_with_context( - user_message=message, + user_message=query, + context_message=context, system_message=system_message, model_name=chat_model, max_prompt_size=max_tokens, diff --git a/src/khoj/routers/research.py b/src/khoj/routers/research.py index 94e19868..dcc86c7a 100644 --- a/src/khoj/routers/research.py +++ b/src/khoj/routers/research.py @@ -77,7 +77,6 @@ async def apick_next_tool( username = prompts.user_name.format(name=user_name) if user_name else "" function_planning_prompt = prompts.plan_function_execution.format( - query=query, tools=tool_options_str, chat_history=chat_history, personality_context=personality_context, @@ -91,7 +90,8 @@ async def apick_next_tool( with timer("Chat actor: Infer information sources to refer", logger): response = await send_message_to_model_wrapper( - function_planning_prompt, + query=query, + context=function_planning_prompt, response_type="json_object", user=user, query_images=query_images,