Rename additional context for llm response to program execution context

This commit is contained in:
sabaimran 2024-12-04 18:43:41 -08:00
parent 886fe4a0c9
commit 8953ac03ec
8 changed files with 21 additions and 24 deletions

View file

@ -160,7 +160,7 @@ def converse_anthropic(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: Optional[List[str]] = None,
program_execution_context: Optional[List[str]] = None,
tracer: dict = {},
):
"""
@ -224,7 +224,7 @@ def converse_anthropic(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)
messages, system_prompt = format_messages_for_anthropic(messages, system_prompt)

View file

@ -170,7 +170,7 @@ def converse_gemini(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: List[str] = None,
program_execution_context: List[str] = None,
tracer={},
):
"""
@ -235,7 +235,7 @@ def converse_gemini(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)
messages, system_prompt = format_messages_for_gemini(messages, system_prompt)

View file

@ -234,7 +234,7 @@ def converse_offline(
model_type=ChatModelOptions.ModelType.OFFLINE,
query_files=query_files,
generated_files=generated_files,
additional_context_for_llm_response=additional_context,
program_execution_context=additional_context,
)
logger.debug(f"Conversation Context for {model}: {messages_to_print(messages)}")

View file

@ -160,7 +160,7 @@ def converse(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: List[str] = None,
program_execution_context: List[str] = None,
tracer: dict = {},
):
"""
@ -226,7 +226,7 @@ def converse(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)
logger.debug(f"Conversation Context for GPT: {messages_to_print(messages)}")

View file

@ -1043,12 +1043,11 @@ A:
additional_program_context = PromptTemplate.from_template(
"""
Here's some additional context about what happened while I was executing this query:
Here are some additional results from the query execution:
{context}
""".strip()
""".strip()
)
personality_prompt_safety_expert_lax = PromptTemplate.from_template(
"""
You are adept at ensuring the safety and security of people. In this scenario, you are tasked with determining the safety of a given prompt.

View file

@ -383,7 +383,7 @@ def generate_chatml_messages_with_context(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: str = None,
additional_context_for_llm_response: List[str] = [],
program_execution_context: List[str] = [],
):
"""Generate chat messages with appropriate context from previous conversation to send to the chat model"""
# Set max prompt size from user config or based on pre-configured for model and machine specs
@ -485,12 +485,10 @@ def generate_chatml_messages_with_context(
if generated_excalidraw_diagram:
messages.append(ChatMessage(content=prompts.generated_diagram_attachment.format(), role="assistant"))
if additional_context_for_llm_response:
if program_execution_context:
messages.append(
ChatMessage(
content=prompts.additional_program_context.format(
context="\n".join(additional_context_for_llm_response)
),
content=prompts.additional_program_context.format(context="\n".join(program_execution_context)),
role="assistant",
)
)

View file

@ -774,7 +774,7 @@ async def chat(
generated_images: List[str] = []
generated_files: List[FileAttachment] = []
generated_excalidraw_diagram: str = None
additional_context_for_llm_response: List[str] = []
program_execution_context: List[str] = []
if conversation_commands == [ConversationCommand.Default] or is_automated_task:
chosen_io = await aget_data_sources_and_output_format(
@ -1080,7 +1080,7 @@ async def chat(
async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
yield result
except ValueError as e:
additional_context_for_llm_response.append(f"Failed to run code")
program_execution_context.append(f"Failed to run code")
logger.warning(
f"Failed to use code tool: {e}. Attempting to respond without code results",
exc_info=True,
@ -1122,7 +1122,7 @@ async def chat(
inferred_queries.append(improved_image_prompt)
if generated_image is None or status_code != 200:
additional_context_for_llm_response.append(f"Failed to generate image with {improved_image_prompt}")
program_execution_context.append(f"Failed to generate image with {improved_image_prompt}")
async for result in send_event(ChatEvent.STATUS, f"Failed to generate image"):
yield result
else:
@ -1175,7 +1175,7 @@ async def chat(
yield result
else:
error_message = "Failed to generate diagram. Please try again later."
additional_context_for_llm_response.append(
program_execution_context.append(
f"AI attempted to programmatically generate a diagram but failed due to a program issue. Generally, it is able to do so, but encountered a system issue this time. AI can suggest text description or rendering of the diagram or user can try again with a simpler prompt."
)
@ -1208,7 +1208,7 @@ async def chat(
generated_images,
generated_files,
generated_excalidraw_diagram,
additional_context_for_llm_response,
program_execution_context,
tracer,
)

View file

@ -1188,7 +1188,7 @@ def generate_chat_response(
generated_images: List[str] = None,
raw_generated_files: List[FileAttachment] = [],
generated_excalidraw_diagram: str = None,
additional_context_for_llm_response: List[str] = [],
program_execution_context: List[str] = [],
tracer: dict = {},
) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
# Initialize Variables
@ -1280,7 +1280,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)
@ -1307,7 +1307,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
@ -1333,7 +1333,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)