Rename additional context to additional_context_for_llm_response

This commit is contained in:
sabaimran 2024-12-03 21:23:15 -08:00
parent c87fce5930
commit 3552032827
10 changed files with 26 additions and 33 deletions
src
interface
obsidian/src
web/app/components/chatMessage
khoj
database/models
processor/conversation
routers

View file

@ -541,12 +541,10 @@ export class KhojChatView extends KhojPaneView {
imageMarkdown += `${message}`; imageMarkdown += `${message}`;
} }
if (!images || images.length === 0) { if (images?.length === 0 && inferredQueries) {
if (inferredQueries) { imageMarkdown += "\n\n**Inferred Query**:";
imageMarkdown += "\n\n**Inferred Query**:"; for (let inferredQuery of inferredQueries) {
for (let inferredQuery of inferredQueries) { imageMarkdown += `\n\n${inferredQuery}`;
imageMarkdown += `\n\n${inferredQuery}`;
}
} }
} }
return imageMarkdown; return imageMarkdown;

View file

@ -397,11 +397,6 @@ const ChatMessage = forwardRef<HTMLDivElement, ChatMessageProps>((props, ref) =>
// Prepare initial message for rendering // Prepare initial message for rendering
let message = props.chatMessage.message; let message = props.chatMessage.message;
if (props.chatMessage.intent && props.chatMessage.intent.type == "excalidraw") {
message = props.chatMessage.intent["inferred-queries"][0];
setExcalidrawData(props.chatMessage.message);
}
if (props.chatMessage.excalidrawDiagram) { if (props.chatMessage.excalidrawDiagram) {
setExcalidrawData(props.chatMessage.excalidrawDiagram); setExcalidrawData(props.chatMessage.excalidrawDiagram);
} }

View file

@ -65,12 +65,12 @@ class PeopleAlsoAsk(PydanticBaseModel):
class KnowledgeGraph(PydanticBaseModel): class KnowledgeGraph(PydanticBaseModel):
attributes: Dict[str, str] attributes: Dict[str, str]
description: str description: Optional[str] = None
descriptionLink: str descriptionLink: Optional[str] = None
descriptionSource: str descriptionSource: Optional[str] = None
imageUrl: str imageUrl: Optional[str] = None
title: str title: str
type: str type: Optional[str] = None
class OrganicContext(PydanticBaseModel): class OrganicContext(PydanticBaseModel):

View file

@ -160,7 +160,7 @@ def converse_anthropic(
generated_images: Optional[list[str]] = None, generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None, generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None, generated_excalidraw_diagram: Optional[str] = None,
additional_context: Optional[List[str]] = None, additional_context_for_llm_response: Optional[List[str]] = None,
tracer: dict = {}, tracer: dict = {},
): ):
""" """
@ -224,7 +224,7 @@ def converse_anthropic(
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files, generated_files=generated_files,
generated_images=generated_images, generated_images=generated_images,
additional_program_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
) )
messages, system_prompt = format_messages_for_anthropic(messages, system_prompt) messages, system_prompt = format_messages_for_anthropic(messages, system_prompt)

View file

@ -170,7 +170,7 @@ def converse_gemini(
generated_images: Optional[list[str]] = None, generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None, generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None, generated_excalidraw_diagram: Optional[str] = None,
additional_context: List[str] = None, additional_context_for_llm_response: List[str] = None,
tracer={}, tracer={},
): ):
""" """
@ -235,7 +235,7 @@ def converse_gemini(
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files, generated_files=generated_files,
generated_images=generated_images, generated_images=generated_images,
additional_program_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
) )
messages, system_prompt = format_messages_for_gemini(messages, system_prompt) messages, system_prompt = format_messages_for_gemini(messages, system_prompt)

View file

@ -234,7 +234,7 @@ def converse_offline(
model_type=ChatModelOptions.ModelType.OFFLINE, model_type=ChatModelOptions.ModelType.OFFLINE,
query_files=query_files, query_files=query_files,
generated_files=generated_files, generated_files=generated_files,
additional_program_context=additional_context, additional_context_for_llm_response=additional_context,
) )
logger.debug(f"Conversation Context for {model}: {messages_to_print(messages)}") logger.debug(f"Conversation Context for {model}: {messages_to_print(messages)}")

View file

@ -160,7 +160,7 @@ def converse(
generated_images: Optional[list[str]] = None, generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None, generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None, generated_excalidraw_diagram: Optional[str] = None,
additional_context: List[str] = None, additional_context_for_llm_response: List[str] = None,
tracer: dict = {}, tracer: dict = {},
): ):
""" """
@ -226,7 +226,7 @@ def converse(
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files, generated_files=generated_files,
generated_images=generated_images, generated_images=generated_images,
additional_program_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
) )
logger.debug(f"Conversation Context for GPT: {messages_to_print(messages)}") logger.debug(f"Conversation Context for GPT: {messages_to_print(messages)}")

View file

@ -186,9 +186,7 @@ Here is the image you generated based on my query. You can follow-up with a gene
generated_diagram_attachment = PromptTemplate.from_template( generated_diagram_attachment = PromptTemplate.from_template(
f""" f"""
The AI has successfully created a diagram based on the user's query and handled the request. Good job! This will be shared with the user. I've successfully created a diagram based on the user's query. The diagram will automatically be shared with the user. I can follow-up with a general response or summary. Limit to 1-2 sentences.
AI can follow-up with a general response or summary. Limit to 1-2 sentences.
""".strip() """.strip()
) )

View file

@ -383,7 +383,7 @@ def generate_chatml_messages_with_context(
generated_images: Optional[list[str]] = None, generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None, generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: str = None, generated_excalidraw_diagram: str = None,
additional_program_context: List[str] = [], additional_context_for_llm_response: List[str] = [],
): ):
"""Generate chat messages with appropriate context from previous conversation to send to the chat model""" """Generate chat messages with appropriate context from previous conversation to send to the chat model"""
# Set max prompt size from user config or based on pre-configured for model and machine specs # Set max prompt size from user config or based on pre-configured for model and machine specs
@ -484,10 +484,12 @@ def generate_chatml_messages_with_context(
if generated_excalidraw_diagram: if generated_excalidraw_diagram:
messages.append(ChatMessage(content=prompts.generated_diagram_attachment.format(), role="assistant")) messages.append(ChatMessage(content=prompts.generated_diagram_attachment.format(), role="assistant"))
if additional_program_context: if additional_context_for_llm_response:
messages.append( messages.append(
ChatMessage( ChatMessage(
content=prompts.additional_program_context.format(context="\n".join(additional_program_context)), content=prompts.additional_program_context.format(
context="\n".join(additional_context_for_llm_response)
),
role="assistant", role="assistant",
) )
) )

View file

@ -1188,7 +1188,7 @@ def generate_chat_response(
generated_images: List[str] = None, generated_images: List[str] = None,
raw_generated_files: List[FileAttachment] = [], raw_generated_files: List[FileAttachment] = [],
generated_excalidraw_diagram: str = None, generated_excalidraw_diagram: str = None,
additional_context: List[str] = [], additional_context_for_llm_response: List[str] = [],
tracer: dict = {}, tracer: dict = {},
) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]: ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
# Initialize Variables # Initialize Variables
@ -1280,7 +1280,7 @@ def generate_chat_response(
generated_files=raw_generated_files, generated_files=raw_generated_files,
generated_images=generated_images, generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
tracer=tracer, tracer=tracer,
) )
@ -1307,7 +1307,7 @@ def generate_chat_response(
generated_files=raw_generated_files, generated_files=raw_generated_files,
generated_images=generated_images, generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
tracer=tracer, tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE: elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
@ -1333,7 +1333,7 @@ def generate_chat_response(
generated_files=raw_generated_files, generated_files=raw_generated_files,
generated_images=generated_images, generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram, generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context=additional_context, additional_context_for_llm_response=additional_context_for_llm_response,
tracer=tracer, tracer=tracer,
) )