mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 23:48:56 +01:00
Pass context in separate message from user query to research chat actor
This commit is contained in:
parent
d75ce4a9e3
commit
d184498038
3 changed files with 14 additions and 12 deletions
|
@ -673,10 +673,7 @@ Which of the tool AIs listed below would you use to answer my question? You **on
|
|||
|
||||
Return the next tool AI to use and the query to ask it. Your response should always be a valid JSON object. Do not say anything else.
|
||||
Response format:
|
||||
{{"scratchpad": "<your_scratchpad_to_reason_about_which_tool_to_use>", "tool": "<name_of_tool_ai>", "query": "<your_query_for_the_tool_ai>"}}
|
||||
|
||||
User: {query}
|
||||
Khoj:
|
||||
{{"scratchpad": "<your_scratchpad_to_reason_about_which_tool_to_use>", "tool": "<name_of_tool_ai>", "query": "<your_detailed_query_for_the_tool_ai>"}}
|
||||
""".strip()
|
||||
)
|
||||
|
||||
|
|
|
@ -799,7 +799,7 @@ async def generate_excalidraw_diagram_from_description(
|
|||
|
||||
with timer("Chat actor: Generate excalidraw diagram", logger):
|
||||
raw_response = await send_message_to_model_wrapper(
|
||||
message=excalidraw_diagram_generation, user=user, tracer=tracer
|
||||
query=excalidraw_diagram_generation, user=user, tracer=tracer
|
||||
)
|
||||
raw_response = raw_response.strip()
|
||||
raw_response = remove_json_codeblock(raw_response)
|
||||
|
@ -879,11 +879,12 @@ async def generate_better_image_prompt(
|
|||
|
||||
|
||||
async def send_message_to_model_wrapper(
|
||||
message: str,
|
||||
query: str,
|
||||
system_message: str = "",
|
||||
response_type: str = "text",
|
||||
user: KhojUser = None,
|
||||
query_images: List[str] = None,
|
||||
context: str = "",
|
||||
tracer: dict = {},
|
||||
):
|
||||
conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
|
||||
|
@ -914,7 +915,8 @@ async def send_message_to_model_wrapper(
|
|||
|
||||
loaded_model = state.offline_chat_processor_config.loaded_model
|
||||
truncated_messages = generate_chatml_messages_with_context(
|
||||
user_message=message,
|
||||
user_message=query,
|
||||
context_message=context,
|
||||
system_message=system_message,
|
||||
model_name=chat_model,
|
||||
loaded_model=loaded_model,
|
||||
|
@ -939,7 +941,8 @@ async def send_message_to_model_wrapper(
|
|||
api_key = openai_chat_config.api_key
|
||||
api_base_url = openai_chat_config.api_base_url
|
||||
truncated_messages = generate_chatml_messages_with_context(
|
||||
user_message=message,
|
||||
user_message=query,
|
||||
context_message=context,
|
||||
system_message=system_message,
|
||||
model_name=chat_model,
|
||||
max_prompt_size=max_tokens,
|
||||
|
@ -960,7 +963,8 @@ async def send_message_to_model_wrapper(
|
|||
elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
||||
api_key = conversation_config.openai_config.api_key
|
||||
truncated_messages = generate_chatml_messages_with_context(
|
||||
user_message=message,
|
||||
user_message=query,
|
||||
context_message=context,
|
||||
system_message=system_message,
|
||||
model_name=chat_model,
|
||||
max_prompt_size=max_tokens,
|
||||
|
@ -979,7 +983,8 @@ async def send_message_to_model_wrapper(
|
|||
elif model_type == ChatModelOptions.ModelType.GOOGLE:
|
||||
api_key = conversation_config.openai_config.api_key
|
||||
truncated_messages = generate_chatml_messages_with_context(
|
||||
user_message=message,
|
||||
user_message=query,
|
||||
context_message=context,
|
||||
system_message=system_message,
|
||||
model_name=chat_model,
|
||||
max_prompt_size=max_tokens,
|
||||
|
|
|
@ -77,7 +77,6 @@ async def apick_next_tool(
|
|||
username = prompts.user_name.format(name=user_name) if user_name else ""
|
||||
|
||||
function_planning_prompt = prompts.plan_function_execution.format(
|
||||
query=query,
|
||||
tools=tool_options_str,
|
||||
chat_history=chat_history,
|
||||
personality_context=personality_context,
|
||||
|
@ -91,7 +90,8 @@ async def apick_next_tool(
|
|||
|
||||
with timer("Chat actor: Infer information sources to refer", logger):
|
||||
response = await send_message_to_model_wrapper(
|
||||
function_planning_prompt,
|
||||
query=query,
|
||||
context=function_planning_prompt,
|
||||
response_type="json_object",
|
||||
user=user,
|
||||
query_images=query_images,
|
||||
|
|
Loading…
Reference in a new issue