diff --git a/src/khoj/processor/conversation/anthropic/anthropic_chat.py b/src/khoj/processor/conversation/anthropic/anthropic_chat.py index 0a242318..688514ca 100644 --- a/src/khoj/processor/conversation/anthropic/anthropic_chat.py +++ b/src/khoj/processor/conversation/anthropic/anthropic_chat.py @@ -160,7 +160,7 @@ def converse_anthropic( generated_images: Optional[list[str]] = None, generated_files: List[FileAttachment] = None, generated_excalidraw_diagram: Optional[str] = None, - additional_context_for_llm_response: Optional[List[str]] = None, + program_execution_context: Optional[List[str]] = None, tracer: dict = {}, ): """ @@ -224,7 +224,7 @@ def converse_anthropic( generated_excalidraw_diagram=generated_excalidraw_diagram, generated_files=generated_files, generated_images=generated_images, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, ) messages, system_prompt = format_messages_for_anthropic(messages, system_prompt) diff --git a/src/khoj/processor/conversation/google/gemini_chat.py b/src/khoj/processor/conversation/google/gemini_chat.py index 304511ca..ad10acda 100644 --- a/src/khoj/processor/conversation/google/gemini_chat.py +++ b/src/khoj/processor/conversation/google/gemini_chat.py @@ -170,7 +170,7 @@ def converse_gemini( generated_images: Optional[list[str]] = None, generated_files: List[FileAttachment] = None, generated_excalidraw_diagram: Optional[str] = None, - additional_context_for_llm_response: List[str] = None, + program_execution_context: List[str] = None, tracer={}, ): """ @@ -235,7 +235,7 @@ def converse_gemini( generated_excalidraw_diagram=generated_excalidraw_diagram, generated_files=generated_files, generated_images=generated_images, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, ) messages, system_prompt = format_messages_for_gemini(messages, system_prompt) diff --git a/src/khoj/processor/conversation/offline/chat_model.py b/src/khoj/processor/conversation/offline/chat_model.py index 853f95ec..d81c194c 100644 --- a/src/khoj/processor/conversation/offline/chat_model.py +++ b/src/khoj/processor/conversation/offline/chat_model.py @@ -234,7 +234,7 @@ def converse_offline( model_type=ChatModelOptions.ModelType.OFFLINE, query_files=query_files, generated_files=generated_files, - additional_context_for_llm_response=additional_context, + program_execution_context=additional_context, ) logger.debug(f"Conversation Context for {model}: {messages_to_print(messages)}") diff --git a/src/khoj/processor/conversation/openai/gpt.py b/src/khoj/processor/conversation/openai/gpt.py index 518e655d..c8faf25e 100644 --- a/src/khoj/processor/conversation/openai/gpt.py +++ b/src/khoj/processor/conversation/openai/gpt.py @@ -160,7 +160,7 @@ def converse( generated_images: Optional[list[str]] = None, generated_files: List[FileAttachment] = None, generated_excalidraw_diagram: Optional[str] = None, - additional_context_for_llm_response: List[str] = None, + program_execution_context: List[str] = None, tracer: dict = {}, ): """ @@ -226,7 +226,7 @@ def converse( generated_excalidraw_diagram=generated_excalidraw_diagram, generated_files=generated_files, generated_images=generated_images, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, ) logger.debug(f"Conversation Context for GPT: {messages_to_print(messages)}") diff --git a/src/khoj/processor/conversation/prompts.py b/src/khoj/processor/conversation/prompts.py index 46dac655..04ee8b13 100644 --- a/src/khoj/processor/conversation/prompts.py +++ b/src/khoj/processor/conversation/prompts.py @@ -1043,12 +1043,11 @@ A: additional_program_context = PromptTemplate.from_template( """ -Here's some additional context about what happened while I was executing this query: +Here are some additional results from the query execution: {context} - """.strip() +""".strip() ) - personality_prompt_safety_expert_lax = PromptTemplate.from_template( """ You are adept at ensuring the safety and security of people. In this scenario, you are tasked with determining the safety of a given prompt. diff --git a/src/khoj/processor/conversation/utils.py b/src/khoj/processor/conversation/utils.py index b28abcae..d68e592e 100644 --- a/src/khoj/processor/conversation/utils.py +++ b/src/khoj/processor/conversation/utils.py @@ -383,7 +383,7 @@ def generate_chatml_messages_with_context( generated_images: Optional[list[str]] = None, generated_files: List[FileAttachment] = None, generated_excalidraw_diagram: str = None, - additional_context_for_llm_response: List[str] = [], + program_execution_context: List[str] = [], ): """Generate chat messages with appropriate context from previous conversation to send to the chat model""" # Set max prompt size from user config or based on pre-configured for model and machine specs @@ -485,12 +485,10 @@ def generate_chatml_messages_with_context( if generated_excalidraw_diagram: messages.append(ChatMessage(content=prompts.generated_diagram_attachment.format(), role="assistant")) - if additional_context_for_llm_response: + if program_execution_context: messages.append( ChatMessage( - content=prompts.additional_program_context.format( - context="\n".join(additional_context_for_llm_response) - ), + content=prompts.additional_program_context.format(context="\n".join(program_execution_context)), role="assistant", ) ) diff --git a/src/khoj/routers/api_chat.py b/src/khoj/routers/api_chat.py index b54b163d..68d77475 100644 --- a/src/khoj/routers/api_chat.py +++ b/src/khoj/routers/api_chat.py @@ -774,7 +774,7 @@ async def chat( generated_images: List[str] = [] generated_files: List[FileAttachment] = [] generated_excalidraw_diagram: str = None - additional_context_for_llm_response: List[str] = [] + program_execution_context: List[str] = [] if conversation_commands == [ConversationCommand.Default] or is_automated_task: chosen_io = await aget_data_sources_and_output_format( @@ -1080,7 +1080,7 @@ async def chat( async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"): yield result except ValueError as e: - additional_context_for_llm_response.append(f"Failed to run code") + program_execution_context.append(f"Failed to run code") logger.warning( f"Failed to use code tool: {e}. Attempting to respond without code results", exc_info=True, @@ -1122,7 +1122,7 @@ async def chat( inferred_queries.append(improved_image_prompt) if generated_image is None or status_code != 200: - additional_context_for_llm_response.append(f"Failed to generate image with {improved_image_prompt}") + program_execution_context.append(f"Failed to generate image with {improved_image_prompt}") async for result in send_event(ChatEvent.STATUS, f"Failed to generate image"): yield result else: @@ -1175,7 +1175,7 @@ async def chat( yield result else: error_message = "Failed to generate diagram. Please try again later." - additional_context_for_llm_response.append( + program_execution_context.append( f"AI attempted to programmatically generate a diagram but failed due to a program issue. Generally, it is able to do so, but encountered a system issue this time. AI can suggest text description or rendering of the diagram or user can try again with a simpler prompt." ) @@ -1208,7 +1208,7 @@ async def chat( generated_images, generated_files, generated_excalidraw_diagram, - additional_context_for_llm_response, + program_execution_context, tracer, ) diff --git a/src/khoj/routers/helpers.py b/src/khoj/routers/helpers.py index 7d61752c..29c44d94 100644 --- a/src/khoj/routers/helpers.py +++ b/src/khoj/routers/helpers.py @@ -1188,7 +1188,7 @@ def generate_chat_response( generated_images: List[str] = None, raw_generated_files: List[FileAttachment] = [], generated_excalidraw_diagram: str = None, - additional_context_for_llm_response: List[str] = [], + program_execution_context: List[str] = [], tracer: dict = {}, ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]: # Initialize Variables @@ -1280,7 +1280,7 @@ def generate_chat_response( generated_files=raw_generated_files, generated_images=generated_images, generated_excalidraw_diagram=generated_excalidraw_diagram, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, tracer=tracer, ) @@ -1307,7 +1307,7 @@ def generate_chat_response( generated_files=raw_generated_files, generated_images=generated_images, generated_excalidraw_diagram=generated_excalidraw_diagram, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, tracer=tracer, ) elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE: @@ -1333,7 +1333,7 @@ def generate_chat_response( generated_files=raw_generated_files, generated_images=generated_images, generated_excalidraw_diagram=generated_excalidraw_diagram, - additional_context_for_llm_response=additional_context_for_llm_response, + program_execution_context=program_execution_context, tracer=tracer, )