Add research mode as a slash command, remove from default path

This commit is contained in:
sabaimran 2024-10-27 15:47:44 -07:00
parent 0bd78791ca
commit 101ea6efb1
3 changed files with 26 additions and 19 deletions

View file

@ -702,7 +702,26 @@ async def chat(
inferred_queries: List[Any] = []
defiltered_query = defilter_query(q)
if conversation_commands == [ConversationCommand.Default]:
if conversation_commands == [ConversationCommand.Default] or is_automated_task:
conversation_commands = await aget_relevant_information_sources(
q,
meta_log,
is_automated_task,
user=user,
query_images=uploaded_images,
agent=agent,
tracer=tracer,
)
mode = await aget_relevant_output_modes(
q, meta_log, is_automated_task, user, uploaded_images, agent, tracer=tracer
)
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
yield result
if mode not in conversation_commands:
conversation_commands.append(mode)
if conversation_commands == [ConversationCommand.Research]:
async for research_result in execute_information_collection(
request=request,
user=user,
@ -738,24 +757,6 @@ async def chat(
pending_research = False
conversation_commands = await aget_relevant_information_sources(
q,
meta_log,
is_automated_task,
user=user,
query_images=uploaded_images,
agent=agent,
tracer=tracer,
)
mode = await aget_relevant_output_modes(
q, meta_log, is_automated_task, user, uploaded_images, agent, tracer=tracer
)
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
yield result
if mode not in conversation_commands:
conversation_commands.append(mode)
for cmd in conversation_commands:
await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
q = q.replace(f"/{cmd.value}", "").strip()

View file

@ -234,6 +234,8 @@ def get_conversation_command(query: str, any_references: bool = False) -> Conver
return ConversationCommand.Diagram
elif query.startswith("/code"):
return ConversationCommand.Code
elif query.startswith("/research"):
return ConversationCommand.Research
# If no relevant notes found for the given query
elif not any_references:
return ConversationCommand.General

View file

@ -320,6 +320,7 @@ class ConversationCommand(str, Enum):
AutomatedTask = "automated_task"
Summarize = "summarize"
Diagram = "diagram"
Research = "research"
command_descriptions = {
@ -334,6 +335,7 @@ command_descriptions = {
ConversationCommand.Help: "Get help with how to use or setup Khoj from the documentation",
ConversationCommand.Summarize: "Get help with a question pertaining to an entire document.",
ConversationCommand.Diagram: "Draw a flowchart, diagram, or any other visual representation best expressed with primitives like lines, rectangles, and text.",
ConversationCommand.Research: "Do deep research on a topic. This will take longer than usual, but give a more detailed, comprehensive answer.",
}
command_descriptions_for_agent = {
@ -342,6 +344,7 @@ command_descriptions_for_agent = {
ConversationCommand.Online: "Agent can search the internet for information.",
ConversationCommand.Webpage: "Agent can read suggested web pages for information.",
ConversationCommand.Summarize: "Agent can read an entire document. Agents knowledge base must be a single document.",
ConversationCommand.Research: "Agent can do deep research on a topic.",
}
tool_descriptions_for_llm = {
@ -352,6 +355,7 @@ tool_descriptions_for_llm = {
ConversationCommand.Webpage: "To use if the user has directly provided the webpage urls or you are certain of the webpage urls to read.",
ConversationCommand.Code: "To run Python code in a Pyodide sandbox with no network access. Helpful when need to parse information, run complex calculations, create documents and charts for user. Matplotlib, bs4, pandas, numpy, etc. are available.",
ConversationCommand.Summarize: "To retrieve an answer that depends on the entire document or a large text.",
ConversationCommand.Research: "To use when you need to do DEEP research on a topic. This will take longer than usual, but give a more detailed, comprehensive answer.",
}
function_calling_description_for_llm = {