mirror of
https://github.com/khoj-ai/khoj.git
synced 2025-02-17 08:04:21 +00:00
Skip summarize while it's broken, and snip some other parts of the workflow while under construction
This commit is contained in:
parent
f7e6f99a32
commit
f71e4969d3
4 changed files with 18 additions and 12 deletions
|
@ -45,6 +45,8 @@ OLOSTEP_QUERY_PARAMS = {
|
|||
"expandMarkdown": "True",
|
||||
"expandHtml": "False",
|
||||
}
|
||||
|
||||
# TODO: Should this be 0 to let advanced model decide which web pages to read?
|
||||
MAX_WEBPAGES_TO_READ = 1
|
||||
|
||||
|
||||
|
|
|
@ -743,11 +743,6 @@ async def chat(
|
|||
uploaded_image_url=uploaded_image_url,
|
||||
agent=agent,
|
||||
)
|
||||
conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
|
||||
async for result in send_event(
|
||||
ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
|
||||
):
|
||||
yield result
|
||||
|
||||
mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, uploaded_image_url, agent)
|
||||
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
|
||||
|
@ -876,11 +871,15 @@ async def chat(
|
|||
defiltered_query = result[2]
|
||||
|
||||
if not is_none_or_empty(compiled_references):
|
||||
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
||||
# Strip only leading # from headings
|
||||
headings = headings.replace("#", "")
|
||||
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
||||
yield result
|
||||
try:
|
||||
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
||||
# Strip only leading # from headings
|
||||
headings = headings.replace("#", "")
|
||||
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
||||
yield result
|
||||
except Exception as e:
|
||||
# TODO Get correct type for compiled across research notes extraction
|
||||
logger.error(f"Error extracting references: {e}", exc_info=True)
|
||||
|
||||
if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
|
||||
async for result in send_llm_response(f"{no_entries_found.format()}"):
|
||||
|
|
|
@ -4,7 +4,7 @@ from typing import Any, Callable, Dict, List, Optional
|
|||
|
||||
from fastapi import Request
|
||||
|
||||
from khoj.database.adapters import EntryAdapters
|
||||
from khoj.database.adapters import ConversationAdapters, EntryAdapters
|
||||
from khoj.database.models import Agent, KhojUser
|
||||
from khoj.processor.conversation import prompts
|
||||
from khoj.processor.conversation.utils import remove_json_codeblock
|
||||
|
@ -76,6 +76,7 @@ async def apick_next_tool(
|
|||
prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
|
||||
)
|
||||
|
||||
# TODO Add current date/time to the query
|
||||
function_planning_prompt = prompts.plan_function_execution.format(
|
||||
query=query,
|
||||
tools=tool_options_str,
|
||||
|
@ -84,11 +85,14 @@ async def apick_next_tool(
|
|||
previous_iterations=previous_iterations_history,
|
||||
)
|
||||
|
||||
chat_model_option = await ConversationAdapters.aget_advanced_conversation_config()
|
||||
|
||||
with timer("Chat actor: Infer information sources to refer", logger):
|
||||
response = await send_message_to_model_wrapper(
|
||||
function_planning_prompt,
|
||||
response_type="json_object",
|
||||
subscribed=subscribed,
|
||||
chat_model_option=chat_model_option,
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -98,6 +102,8 @@ async def apick_next_tool(
|
|||
suggested_data_source = response.get("data_source", None)
|
||||
suggested_query = response.get("query", None)
|
||||
|
||||
logger.info(f"Response for determining relevant tools: {response}")
|
||||
|
||||
return InformationCollectionIteration(
|
||||
data_source=suggested_data_source,
|
||||
query=suggested_query,
|
||||
|
|
|
@ -349,7 +349,6 @@ function_calling_description_for_llm = {
|
|||
ConversationCommand.Notes: "Use this if you think the user's personal knowledge base contains relevant context.",
|
||||
ConversationCommand.Online: "Use this if you think the there's important information on the internet related to the query.",
|
||||
ConversationCommand.Webpage: "Use this if the user has provided a webpage URL or you are share of a webpage URL that will help you directly answer this query",
|
||||
ConversationCommand.Summarize: "Use this if you want to retrieve an answer that depends on reading an entire corpus.",
|
||||
}
|
||||
|
||||
mode_descriptions_for_llm = {
|
||||
|
|
Loading…
Add table
Reference in a new issue