Make tracer the last input parameter for all the relevant chat helper methods

This commit is contained in:
sabaimran 2024-11-09 18:22:46 -08:00
parent 3badb27744
commit bc95a99fb4
12 changed files with 56 additions and 57 deletions

View file

@ -36,8 +36,8 @@ def extract_questions_anthropic(
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_enabled: bool = False, vision_enabled: bool = False,
personality_context: Optional[str] = None, personality_context: Optional[str] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Infer search queries to retrieve relevant notes to answer user query Infer search queries to retrieve relevant notes to answer user query
@ -154,8 +154,8 @@ def converse_anthropic(
agent: Agent = None, agent: Agent = None,
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_available: bool = False, vision_available: bool = False,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Converse with user using Anthropic's Claude Converse with user using Anthropic's Claude

View file

@ -37,8 +37,8 @@ def extract_questions_gemini(
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_enabled: bool = False, vision_enabled: bool = False,
personality_context: Optional[str] = None, personality_context: Optional[str] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Infer search queries to retrieve relevant notes to answer user query Infer search queries to retrieve relevant notes to answer user query
@ -122,7 +122,6 @@ def gemini_send_message_to_model(
temperature=0, temperature=0,
model_kwargs=None, model_kwargs=None,
tracer={}, tracer={},
attached_files: str = None,
): ):
""" """
Send message to model Send message to model
@ -165,8 +164,8 @@ def converse_gemini(
agent: Agent = None, agent: Agent = None,
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_available: bool = False, vision_available: bool = False,
tracer={},
attached_files: str = None, attached_files: str = None,
tracer={},
): ):
""" """
Converse with user using Google's Gemini Converse with user using Google's Gemini

View file

@ -37,8 +37,8 @@ def extract_questions_offline(
max_prompt_size: int = None, max_prompt_size: int = None,
temperature: float = 0.7, temperature: float = 0.7,
personality_context: Optional[str] = None, personality_context: Optional[str] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
) -> List[str]: ) -> List[str]:
""" """
Infer search queries to retrieve relevant notes to answer user query Infer search queries to retrieve relevant notes to answer user query
@ -154,8 +154,8 @@ def converse_offline(
location_data: LocationData = None, location_data: LocationData = None,
user_name: str = None, user_name: str = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
) -> Union[ThreadedGenerator, Iterator[str]]: ) -> Union[ThreadedGenerator, Iterator[str]]:
""" """
Converse with user using Llama Converse with user using Llama

View file

@ -34,8 +34,8 @@ def extract_questions(
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_enabled: bool = False, vision_enabled: bool = False,
personality_context: Optional[str] = None, personality_context: Optional[str] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Infer search queries to retrieve relevant notes to answer user query Infer search queries to retrieve relevant notes to answer user query
@ -154,8 +154,8 @@ def converse(
agent: Agent = None, agent: Agent = None,
query_images: Optional[list[str]] = None, query_images: Optional[list[str]] = None,
vision_available: bool = False, vision_available: bool = False,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Converse with user using OpenAI's ChatGPT Converse with user using OpenAI's ChatGPT

View file

@ -224,8 +224,8 @@ def save_to_conversation_log(
automation_id: str = None, automation_id: str = None,
query_images: List[str] = None, query_images: List[str] = None,
raw_attached_files: List[FileAttachment] = [], raw_attached_files: List[FileAttachment] = [],
tracer: Dict[str, Any] = {},
train_of_thought: List[Any] = [], train_of_thought: List[Any] = [],
tracer: Dict[str, Any] = {},
): ):
user_message_time = user_message_time or datetime.now().strftime("%Y-%m-%d %H:%M:%S") user_message_time = user_message_time or datetime.now().strftime("%Y-%m-%d %H:%M:%S")
turn_id = tracer.get("mid") or str(uuid.uuid4()) turn_id = tracer.get("mid") or str(uuid.uuid4())

View file

@ -28,8 +28,8 @@ async def text_to_image(
send_status_func: Optional[Callable] = None, send_status_func: Optional[Callable] = None,
query_images: Optional[List[str]] = None, query_images: Optional[List[str]] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
status_code = 200 status_code = 200
image = None image = None
@ -70,8 +70,8 @@ async def text_to_image(
query_images=query_images, query_images=query_images,
user=user, user=user,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
if send_status_func: if send_status_func:

View file

@ -67,8 +67,8 @@ async def search_online(
max_webpages_to_read: int = DEFAULT_MAX_WEBPAGES_TO_READ, max_webpages_to_read: int = DEFAULT_MAX_WEBPAGES_TO_READ,
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
query += " ".join(custom_filters) query += " ".join(custom_filters)
if not is_internet_connected(): if not is_internet_connected():
@ -165,9 +165,9 @@ async def read_webpages(
send_status_func: Optional[Callable] = None, send_status_func: Optional[Callable] = None,
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
max_webpages_to_read: int = DEFAULT_MAX_WEBPAGES_TO_READ, max_webpages_to_read: int = DEFAULT_MAX_WEBPAGES_TO_READ,
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
"Infer web pages to read from the query and extract relevant information from them" "Infer web pages to read from the query and extract relevant information from them"
logger.info(f"Inferring web pages to read") logger.info(f"Inferring web pages to read")
@ -178,8 +178,8 @@ async def read_webpages(
user, user,
query_images, query_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
# Get the top 10 web pages to read # Get the top 10 web pages to read

View file

@ -31,8 +31,8 @@ async def run_code(
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
sandbox_url: str = SANDBOX_URL, sandbox_url: str = SANDBOX_URL,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
# Generate Code # Generate Code
if send_status_func: if send_status_func:

View file

@ -350,8 +350,8 @@ async def extract_references_and_questions(
send_status_func: Optional[Callable] = None, send_status_func: Optional[Callable] = None,
query_images: Optional[List[str]] = None, query_images: Optional[List[str]] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
user = request.user.object if request.user.is_authenticated else None user = request.user.object if request.user.is_authenticated else None
@ -425,8 +425,8 @@ async def extract_references_and_questions(
user=user, user=user,
max_prompt_size=conversation_config.max_prompt_size, max_prompt_size=conversation_config.max_prompt_size,
personality_context=personality_context, personality_context=personality_context,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI: elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
openai_chat_config = conversation_config.openai_config openai_chat_config = conversation_config.openai_config
@ -444,8 +444,8 @@ async def extract_references_and_questions(
query_images=query_images, query_images=query_images,
vision_enabled=vision_enabled, vision_enabled=vision_enabled,
personality_context=personality_context, personality_context=personality_context,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC: elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
api_key = conversation_config.openai_config.api_key api_key = conversation_config.openai_config.api_key
@ -460,8 +460,8 @@ async def extract_references_and_questions(
user=user, user=user,
vision_enabled=vision_enabled, vision_enabled=vision_enabled,
personality_context=personality_context, personality_context=personality_context,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE: elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
api_key = conversation_config.openai_config.api_key api_key = conversation_config.openai_config.api_key
@ -477,8 +477,8 @@ async def extract_references_and_questions(
user=user, user=user,
vision_enabled=vision_enabled, vision_enabled=vision_enabled,
personality_context=personality_context, personality_context=personality_context,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
# Collate search results as context for GPT # Collate search results as context for GPT

View file

@ -756,8 +756,8 @@ async def chat(
user=user, user=user,
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
) )
# If we're doing research, we don't want to do anything else # If we're doing research, we don't want to do anything else
@ -797,8 +797,8 @@ async def chat(
user_name=user_name, user_name=user_name,
location=location, location=location,
file_filters=conversation.file_filters if conversation else [], file_filters=conversation.file_filters if conversation else [],
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(research_result, InformationCollectionIteration): if isinstance(research_result, InformationCollectionIteration):
if research_result.summarizedResult: if research_result.summarizedResult:
@ -849,8 +849,8 @@ async def chat(
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
send_status_func=partial(send_event, ChatEvent.STATUS), send_status_func=partial(send_event, ChatEvent.STATUS),
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(response, dict) and ChatEvent.STATUS in response: if isinstance(response, dict) and ChatEvent.STATUS in response:
yield response[ChatEvent.STATUS] yield response[ChatEvent.STATUS]
@ -870,9 +870,9 @@ async def chat(
client_application=request.user.client_app, client_application=request.user.client_app,
conversation_id=conversation_id, conversation_id=conversation_id,
query_images=uploaded_images, query_images=uploaded_images,
tracer=tracer,
train_of_thought=train_of_thought, train_of_thought=train_of_thought,
raw_attached_files=raw_attached_files, raw_attached_files=raw_attached_files,
tracer=tracer,
) )
return return
@ -916,9 +916,9 @@ async def chat(
inferred_queries=[query_to_run], inferred_queries=[query_to_run],
automation_id=automation.id, automation_id=automation.id,
query_images=uploaded_images, query_images=uploaded_images,
tracer=tracer,
train_of_thought=train_of_thought, train_of_thought=train_of_thought,
raw_attached_files=raw_attached_files, raw_attached_files=raw_attached_files,
tracer=tracer,
) )
async for result in send_llm_response(llm_response): async for result in send_llm_response(llm_response):
yield result yield result
@ -940,8 +940,8 @@ async def chat(
partial(send_event, ChatEvent.STATUS), partial(send_event, ChatEvent.STATUS),
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -986,8 +986,8 @@ async def chat(
custom_filters, custom_filters,
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -1012,8 +1012,8 @@ async def chat(
partial(send_event, ChatEvent.STATUS), partial(send_event, ChatEvent.STATUS),
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -1053,8 +1053,8 @@ async def chat(
partial(send_event, ChatEvent.STATUS), partial(send_event, ChatEvent.STATUS),
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -1093,8 +1093,8 @@ async def chat(
send_status_func=partial(send_event, ChatEvent.STATUS), send_status_func=partial(send_event, ChatEvent.STATUS),
query_images=uploaded_images, query_images=uploaded_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -1125,10 +1125,10 @@ async def chat(
compiled_references=compiled_references, compiled_references=compiled_references,
online_results=online_results, online_results=online_results,
query_images=uploaded_images, query_images=uploaded_images,
tracer=tracer,
train_of_thought=train_of_thought, train_of_thought=train_of_thought,
attached_file_context=attached_file_context, attached_file_context=attached_file_context,
raw_attached_files=raw_attached_files, raw_attached_files=raw_attached_files,
tracer=tracer,
) )
content_obj = { content_obj = {
"intentType": intent_type, "intentType": intent_type,
@ -1157,8 +1157,8 @@ async def chat(
user=user, user=user,
agent=agent, agent=agent,
send_status_func=partial(send_event, ChatEvent.STATUS), send_status_func=partial(send_event, ChatEvent.STATUS),
tracer=tracer,
attached_files=attached_file_context, attached_files=attached_file_context,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]
@ -1186,10 +1186,10 @@ async def chat(
compiled_references=compiled_references, compiled_references=compiled_references,
online_results=online_results, online_results=online_results,
query_images=uploaded_images, query_images=uploaded_images,
tracer=tracer,
train_of_thought=train_of_thought, train_of_thought=train_of_thought,
attached_file_context=attached_file_context, attached_file_context=attached_file_context,
raw_attached_files=raw_attached_files, raw_attached_files=raw_attached_files,
tracer=tracer,
) )
async for result in send_llm_response(json.dumps(content_obj)): async for result in send_llm_response(json.dumps(content_obj)):
@ -1215,10 +1215,10 @@ async def chat(
user_name, user_name,
researched_results, researched_results,
uploaded_images, uploaded_images,
tracer,
train_of_thought, train_of_thought,
attached_file_context, attached_file_context,
raw_attached_files, raw_attached_files,
tracer,
) )
# Send Response # Send Response

View file

@ -361,8 +361,8 @@ async def aget_relevant_information_sources(
user: KhojUser, user: KhojUser,
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
""" """
Given a query, determine which of the available tools the agent should use in order to answer appropriately. Given a query, determine which of the available tools the agent should use in order to answer appropriately.
@ -399,8 +399,8 @@ async def aget_relevant_information_sources(
relevant_tools_prompt, relevant_tools_prompt,
response_type="json_object", response_type="json_object",
user=user, user=user,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
try: try:
@ -509,8 +509,8 @@ async def infer_webpage_urls(
user: KhojUser, user: KhojUser,
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
) -> List[str]: ) -> List[str]:
""" """
Infer webpage links from the given query Infer webpage links from the given query
@ -539,8 +539,8 @@ async def infer_webpage_urls(
query_images=query_images, query_images=query_images,
response_type="json_object", response_type="json_object",
user=user, user=user,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
# Validate that the response is a non-empty, JSON-serializable list of URLs # Validate that the response is a non-empty, JSON-serializable list of URLs
@ -565,8 +565,8 @@ async def generate_online_subqueries(
user: KhojUser, user: KhojUser,
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
) -> List[str]: ) -> List[str]:
""" """
Generate subqueries from the given query Generate subqueries from the given query
@ -595,8 +595,8 @@ async def generate_online_subqueries(
query_images=query_images, query_images=query_images,
response_type="json_object", response_type="json_object",
user=user, user=user,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
# Validate that the response is a non-empty, JSON-serializable list # Validate that the response is a non-empty, JSON-serializable list
@ -718,8 +718,8 @@ async def generate_summary_from_files(
query_images: List[str] = None, query_images: List[str] = None,
agent: Agent = None, agent: Agent = None,
send_status_func: Optional[Callable] = None, send_status_func: Optional[Callable] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
try: try:
file_objects = None file_objects = None
@ -781,8 +781,8 @@ async def generate_excalidraw_diagram(
user: KhojUser = None, user: KhojUser = None,
agent: Agent = None, agent: Agent = None,
send_status_func: Optional[Callable] = None, send_status_func: Optional[Callable] = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
if send_status_func: if send_status_func:
async for event in send_status_func("**Enhancing the Diagramming Prompt**"): async for event in send_status_func("**Enhancing the Diagramming Prompt**"):
@ -797,8 +797,8 @@ async def generate_excalidraw_diagram(
query_images=query_images, query_images=query_images,
user=user, user=user,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
if send_status_func: if send_status_func:
@ -824,8 +824,8 @@ async def generate_better_diagram_description(
query_images: List[str] = None, query_images: List[str] = None,
user: KhojUser = None, user: KhojUser = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
) -> str: ) -> str:
""" """
Generate a diagram description from the given query and context Generate a diagram description from the given query and context
@ -866,8 +866,8 @@ async def generate_better_diagram_description(
improve_diagram_description_prompt, improve_diagram_description_prompt,
query_images=query_images, query_images=query_images,
user=user, user=user,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
response = response.strip() response = response.strip()
if response.startswith(('"', "'")) and response.endswith(('"', "'")): if response.startswith(('"', "'")) and response.endswith(('"', "'")):
@ -914,8 +914,8 @@ async def generate_better_image_prompt(
query_images: Optional[List[str]] = None, query_images: Optional[List[str]] = None,
user: KhojUser = None, user: KhojUser = None,
agent: Agent = None, agent: Agent = None,
tracer: dict = {},
attached_files: str = "", attached_files: str = "",
tracer: dict = {},
) -> str: ) -> str:
""" """
Generate a better image prompt from the given query Generate a better image prompt from the given query
@ -963,7 +963,7 @@ async def generate_better_image_prompt(
with timer("Chat actor: Generate contextual image prompt", logger): with timer("Chat actor: Generate contextual image prompt", logger):
response = await send_message_to_model_wrapper( response = await send_message_to_model_wrapper(
image_prompt, query_images=query_images, user=user, tracer=tracer, attached_files=attached_files image_prompt, query_images=query_images, user=user, attached_files=attached_files, tracer=tracer
) )
response = response.strip() response = response.strip()
if response.startswith(('"', "'")) and response.endswith(('"', "'")): if response.startswith(('"', "'")) and response.endswith(('"', "'")):
@ -979,8 +979,8 @@ async def send_message_to_model_wrapper(
user: KhojUser = None, user: KhojUser = None,
query_images: List[str] = None, query_images: List[str] = None,
context: str = "", context: str = "",
tracer: dict = {},
attached_files: str = None, attached_files: str = None,
tracer: dict = {},
): ):
conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user) conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
vision_available = conversation_config.vision_enabled vision_available = conversation_config.vision_enabled
@ -1106,8 +1106,8 @@ def send_message_to_model_wrapper_sync(
system_message: str = "", system_message: str = "",
response_type: str = "text", response_type: str = "text",
user: KhojUser = None, user: KhojUser = None,
tracer: dict = {},
attached_files: str = "", attached_files: str = "",
tracer: dict = {},
): ):
conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user) conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user)
@ -1225,10 +1225,10 @@ def generate_chat_response(
user_name: Optional[str] = None, user_name: Optional[str] = None,
meta_research: str = "", meta_research: str = "",
query_images: Optional[List[str]] = None, query_images: Optional[List[str]] = None,
tracer: dict = {},
train_of_thought: List[Any] = [], train_of_thought: List[Any] = [],
attached_files: str = None, attached_files: str = None,
raw_attached_files: List[FileAttachment] = None, raw_attached_files: List[FileAttachment] = None,
tracer: dict = {},
) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]: ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
# Initialize Variables # Initialize Variables
chat_response = None chat_response = None
@ -1252,9 +1252,9 @@ def generate_chat_response(
client_application=client_application, client_application=client_application,
conversation_id=conversation_id, conversation_id=conversation_id,
query_images=query_images, query_images=query_images,
tracer=tracer,
train_of_thought=train_of_thought, train_of_thought=train_of_thought,
raw_attached_files=raw_attached_files, raw_attached_files=raw_attached_files,
tracer=tracer,
) )
conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation) conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
@ -1281,8 +1281,8 @@ def generate_chat_response(
location_data=location_data, location_data=location_data,
user_name=user_name, user_name=user_name,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI: elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@ -1307,8 +1307,8 @@ def generate_chat_response(
user_name=user_name, user_name=user_name,
agent=agent, agent=agent,
vision_available=vision_available, vision_available=vision_available,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC: elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
@ -1330,8 +1330,8 @@ def generate_chat_response(
user_name=user_name, user_name=user_name,
agent=agent, agent=agent,
vision_available=vision_available, vision_available=vision_available,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE: elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
api_key = conversation_config.openai_config.api_key api_key = conversation_config.openai_config.api_key
@ -1352,8 +1352,8 @@ def generate_chat_response(
agent=agent, agent=agent,
query_images=query_images, query_images=query_images,
vision_available=vision_available, vision_available=vision_available,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
) )
metadata.update({"chat_model": conversation_config.chat_model}) metadata.update({"chat_model": conversation_config.chat_model})

View file

@ -268,8 +268,8 @@ async def execute_information_collection(
send_status_func, send_status_func,
query_images=query_images, query_images=query_images,
agent=agent, agent=agent,
tracer=tracer,
attached_files=attached_files, attached_files=attached_files,
tracer=tracer,
): ):
if isinstance(result, dict) and ChatEvent.STATUS in result: if isinstance(result, dict) and ChatEvent.STATUS in result:
yield result[ChatEvent.STATUS] yield result[ChatEvent.STATUS]