mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 15:38:55 +01:00
Extract prompts as LangChain Prompt Templates into a separate module
Improves code modularity, cleanliness. Reduces bloat in GPT.py module
This commit is contained in:
parent
b484953bb3
commit
efcf7d1508
3 changed files with 189 additions and 126 deletions
|
@ -53,6 +53,7 @@ dependencies = [
|
|||
"torch == 1.13.1",
|
||||
"uvicorn == 0.17.6",
|
||||
"aiohttp == 3.8.4",
|
||||
"langchain >= 0.0.187",
|
||||
]
|
||||
dynamic = ["version"]
|
||||
|
||||
|
|
|
@ -5,10 +5,10 @@ from datetime import datetime
|
|||
|
||||
# Internal Packages
|
||||
from khoj.utils.constants import empty_escape_sequences
|
||||
from khoj.processor.conversation import prompts
|
||||
from khoj.processor.conversation.utils import (
|
||||
chat_completion_with_backoff,
|
||||
completion_with_backoff,
|
||||
message_to_prompt,
|
||||
generate_chatml_messages_with_context,
|
||||
)
|
||||
|
||||
|
@ -20,17 +20,9 @@ def answer(text, user_query, model, api_key=None, temperature=0.5, max_tokens=50
|
|||
"""
|
||||
Answer user query using provided text as reference with OpenAI's GPT
|
||||
"""
|
||||
# Setup Prompt based on Summary Type
|
||||
prompt = f"""
|
||||
You are a friendly, helpful personal assistant.
|
||||
Using the users notes below, answer their following question. If the answer is not contained within the notes, say "I don't know."
|
||||
# Setup Prompt from arguments
|
||||
prompt = prompts.answer.format(text=text, user_query=user_query)
|
||||
|
||||
Notes:
|
||||
{text}
|
||||
|
||||
Question: {user_query}
|
||||
|
||||
Answer (in second person):"""
|
||||
# Get Response from GPT
|
||||
logger.debug(f"Prompt for GPT: {prompt}")
|
||||
response = completion_with_backoff(
|
||||
|
@ -53,19 +45,9 @@ def summarize(text, summary_type, model, user_query=None, api_key=None, temperat
|
|||
"""
|
||||
# Setup Prompt based on Summary Type
|
||||
if summary_type == "chat":
|
||||
prompt = f"""
|
||||
You are an AI. Summarize the conversation below from your perspective:
|
||||
|
||||
{text}
|
||||
|
||||
Summarize the conversation from the AI's first-person perspective:"""
|
||||
prompt = prompts.summarize_chat.format(text=text)
|
||||
elif summary_type == "notes":
|
||||
prompt = f"""
|
||||
Summarize the below notes about {user_query}:
|
||||
|
||||
{text}
|
||||
|
||||
Summarize the notes in second person perspective:"""
|
||||
prompt = prompts.summarize_notes.format(text=text, user_query=user_query)
|
||||
|
||||
# Get Response from GPT
|
||||
logger.debug(f"Prompt for GPT: {prompt}")
|
||||
|
@ -102,63 +84,16 @@ def extract_questions(text, model="text-davinci-003", conversation_log={}, api_k
|
|||
current_new_year = today.replace(month=1, day=1)
|
||||
last_new_year = current_new_year.replace(year=today.year - 1)
|
||||
|
||||
prompt = f"""
|
||||
You are Khoj, an extremely smart and helpful search assistant with the ability to retrieve information from the users notes.
|
||||
- The user will provide their questions and answers to you for context.
|
||||
- Add as much context from the previous questions and answers as required into your search queries.
|
||||
- Break messages into multiple search queries when required to retrieve the relevant information.
|
||||
- Add date filters to your search queries from questions and answers when required to retrieve the relevant information.
|
||||
|
||||
What searches, if any, will you need to perform to answer the users question?
|
||||
Provide search queries as a JSON list of strings
|
||||
Current Date: {today.strftime("%A, %Y-%m-%d")}
|
||||
|
||||
Q: How was my trip to Cambodia?
|
||||
|
||||
["How was my trip to Cambodia?"]
|
||||
|
||||
A: The trip was amazing. I went to the Angkor Wat temple and it was beautiful.
|
||||
|
||||
Q: Who did i visit that temple with?
|
||||
|
||||
["Who did I visit the Angkor Wat Temple in Cambodia with?"]
|
||||
|
||||
A: You visited the Angkor Wat Temple in Cambodia with Pablo, Namita and Xi.
|
||||
|
||||
Q: What national parks did I go to last year?
|
||||
|
||||
["National park I visited in {last_new_year.strftime("%Y")} dt>=\\"{last_new_year.strftime("%Y-%m-%d")}\\" dt<\\"{current_new_year.strftime("%Y-%m-%d")}\\""]
|
||||
|
||||
A: You visited the Grand Canyon and Yellowstone National Park in {last_new_year.strftime("%Y")}.
|
||||
|
||||
Q: How are you feeling today?
|
||||
|
||||
[]
|
||||
|
||||
A: I'm feeling a little bored. Helping you will hopefully make me feel better!
|
||||
|
||||
Q: How many tennis balls fit in the back of a 2002 Honda Civic?
|
||||
|
||||
["What is the size of a tennis ball?", "What is the trunk size of a 2002 Honda Civic?"]
|
||||
|
||||
A: 1085 tennis balls will fit in the trunk of a Honda Civic
|
||||
|
||||
Q: Is Bob older than Tom?
|
||||
|
||||
["When was Bob born?", "What is Tom's age?"]
|
||||
|
||||
A: Yes, Bob is older than Tom. As Bob was born on 1984-01-01 and Tom is 30 years old.
|
||||
|
||||
Q: What is their age difference?
|
||||
|
||||
["What is Bob's age?", "What is Tom's age?"]
|
||||
|
||||
A: Bob is {current_new_year.year - 1984 - 30} years older than Tom. As Bob is {current_new_year.year - 1984} years old and Tom is 30 years old.
|
||||
|
||||
{chat_history}
|
||||
Q: {text}
|
||||
|
||||
"""
|
||||
prompt = prompts.extract_questions.format(
|
||||
current_date=today.strftime("%A, %Y-%m-%d"),
|
||||
last_new_year=last_new_year.strftime("%Y"),
|
||||
last_new_year_date=last_new_year.strftime("%Y-%m-%d"),
|
||||
current_new_year_date=current_new_year.strftime("%Y-%m-%d"),
|
||||
bob_tom_age_difference={current_new_year.year - 1984 - 30},
|
||||
bob_age={current_new_year.year - 1984},
|
||||
chat_history=chat_history,
|
||||
text=text,
|
||||
)
|
||||
|
||||
# Get Response from GPT
|
||||
response = completion_with_backoff(
|
||||
|
@ -191,31 +126,8 @@ def extract_search_type(text, model, api_key=None, temperature=0.5, max_tokens=1
|
|||
"""
|
||||
Extract search type from user query using OpenAI's GPT
|
||||
"""
|
||||
# Initialize Variables
|
||||
understand_primer = """
|
||||
Objective: Extract search type from user query and return information as JSON
|
||||
|
||||
Allowed search types are listed below:
|
||||
- search-type=["notes","ledger","image","music"]
|
||||
|
||||
Some examples are given below for reference:
|
||||
Q:What fiction book was I reading last week about AI starship?
|
||||
A:{ "search-type": "notes" }
|
||||
Q:Play some calm classical music?
|
||||
A:{ "search-type": "music" }
|
||||
Q:How much did I spend at Subway for dinner last time?
|
||||
A:{ "search-type": "ledger" }
|
||||
Q:What was that popular Sri lankan song that Alex had mentioned?
|
||||
A:{ "search-type": "music" }
|
||||
Q:Can you recommend a movie to watch from my notes?
|
||||
A:{ "search-type": "notes" }
|
||||
Q: When did I buy Groceries last?
|
||||
A:{ "search-type": "ledger" }
|
||||
Q:When did I go surfing last?
|
||||
A:{ "search-type": "notes" }"""
|
||||
|
||||
# Setup Prompt with Understand Primer
|
||||
prompt = message_to_prompt(text, understand_primer, start_sequence="\nA:", restart_sequence="\nQ:")
|
||||
# Setup Prompt to extract search type
|
||||
prompt = prompts.search_type + f"{text}\nA:"
|
||||
if verbose > 1:
|
||||
print(f"Message -> Prompt: {text} -> {prompt}")
|
||||
|
||||
|
@ -241,36 +153,23 @@ def converse(references, user_query, conversation_log={}, model="gpt-3.5-turbo",
|
|||
Converse with user using OpenAI's ChatGPT
|
||||
"""
|
||||
# Initialize Variables
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
compiled_references = "\n\n".join({f"# {item}" for item in references})
|
||||
|
||||
personality_primer = "You are Khoj, a friendly, smart and helpful personal assistant."
|
||||
conversation_primers = {
|
||||
"general": f"""
|
||||
Using your general knowledge and our past conversations as context, answer the following question.
|
||||
Current Date: {datetime.now().strftime("%Y-%m-%d")}
|
||||
|
||||
Question: {user_query}
|
||||
""".strip(),
|
||||
"notes": f"""
|
||||
Using the notes and our past conversations as context, answer the following question.
|
||||
Current Date: {datetime.now().strftime("%Y-%m-%d")}
|
||||
|
||||
Notes:
|
||||
{compiled_references}
|
||||
|
||||
Question: {user_query}
|
||||
""".strip(),
|
||||
}
|
||||
|
||||
# Get Conversation Primer appropriate to Conversation Type
|
||||
conversation_type = "general" if user_query.startswith("@general") or compiled_references.strip() == "" else "notes"
|
||||
logger.debug(f"Conversation Type: {conversation_type}")
|
||||
conversation_primer = conversation_primers.get(conversation_type)
|
||||
if conversation_type == "general":
|
||||
conversation_primer = prompts.general_conversation.format(current_date=current_date, query=user_query)
|
||||
else:
|
||||
conversation_primer = prompts.notes_conversation.format(
|
||||
current_date=current_date, query=user_query, references=compiled_references
|
||||
)
|
||||
|
||||
# Setup Prompt with Primer or Conversation History
|
||||
messages = generate_chatml_messages_with_context(
|
||||
conversation_primer,
|
||||
personality_primer,
|
||||
prompts.personality.format(),
|
||||
conversation_log,
|
||||
model,
|
||||
)
|
||||
|
|
163
src/khoj/processor/conversation/prompts.py
Normal file
163
src/khoj/processor/conversation/prompts.py
Normal file
|
@ -0,0 +1,163 @@
|
|||
# External Packages
|
||||
from langchain.prompts import PromptTemplate
|
||||
|
||||
|
||||
## Personality
|
||||
## --
|
||||
personality = PromptTemplate.from_template("You are Khoj, a friendly, smart and helpful personal assistant.")
|
||||
|
||||
|
||||
## General Conversation
|
||||
## --
|
||||
general_conversation = PromptTemplate.from_template(
|
||||
"""
|
||||
Using your general knowledge and our past conversations as context, answer the following question.
|
||||
Current Date: {current_date}
|
||||
|
||||
Question: {query}
|
||||
""".strip()
|
||||
)
|
||||
|
||||
|
||||
## Notes Conversation
|
||||
## --
|
||||
notes_conversation = PromptTemplate.from_template(
|
||||
"""
|
||||
Using the notes and our past conversations as context, answer the following question.
|
||||
Current Date: {current_date}
|
||||
|
||||
Notes:
|
||||
{references}
|
||||
|
||||
Question: {query}
|
||||
""".strip()
|
||||
)
|
||||
|
||||
|
||||
## Summarize Chat
|
||||
## --
|
||||
summarize_chat = PromptTemplate.from_template(
|
||||
"""
|
||||
You are an AI. Summarize the conversation below from your perspective:
|
||||
|
||||
{text}
|
||||
|
||||
Summarize the conversation from the AI's first-person perspective:"""
|
||||
)
|
||||
|
||||
|
||||
## Summarize Notes
|
||||
## --
|
||||
summarize_notes = PromptTemplate.from_template(
|
||||
"""
|
||||
Summarize the below notes about {user_query}:
|
||||
|
||||
{text}
|
||||
|
||||
Summarize the notes in second person perspective:"""
|
||||
)
|
||||
|
||||
|
||||
## Answer
|
||||
## --
|
||||
answer = PromptTemplate.from_template(
|
||||
"""
|
||||
You are a friendly, helpful personal assistant.
|
||||
Using the users notes below, answer their following question. If the answer is not contained within the notes, say "I don't know."
|
||||
|
||||
Notes:
|
||||
{text}
|
||||
|
||||
Question: {user_query}
|
||||
|
||||
Answer (in second person):"""
|
||||
)
|
||||
|
||||
|
||||
## Extract Questions
|
||||
## --
|
||||
extract_questions = PromptTemplate.from_template(
|
||||
"""
|
||||
You are Khoj, an extremely smart and helpful search assistant with the ability to retrieve information from the users notes.
|
||||
- The user will provide their questions and answers to you for context.
|
||||
- Add as much context from the previous questions and answers as required into your search queries.
|
||||
- Break messages into multiple search queries when required to retrieve the relevant information.
|
||||
- Add date filters to your search queries from questions and answers when required to retrieve the relevant information.
|
||||
|
||||
What searches, if any, will you need to perform to answer the users question?
|
||||
Provide search queries as a JSON list of strings
|
||||
Current Date: {current_date}
|
||||
|
||||
Q: How was my trip to Cambodia?
|
||||
|
||||
["How was my trip to Cambodia?"]
|
||||
|
||||
A: The trip was amazing. I went to the Angkor Wat temple and it was beautiful.
|
||||
|
||||
Q: Who did i visit that temple with?
|
||||
|
||||
["Who did I visit the Angkor Wat Temple in Cambodia with?"]
|
||||
|
||||
A: You visited the Angkor Wat Temple in Cambodia with Pablo, Namita and Xi.
|
||||
|
||||
Q: What national parks did I go to last year?
|
||||
|
||||
["National park I visited in {last_new_year} dt>=\\"{last_new_year_date}\\" dt<\\"{current_new_year_date}\\""]
|
||||
|
||||
A: You visited the Grand Canyon and Yellowstone National Park in {last_new_year}.
|
||||
|
||||
Q: How are you feeling today?
|
||||
|
||||
[]
|
||||
|
||||
A: I'm feeling a little bored. Helping you will hopefully make me feel better!
|
||||
|
||||
Q: How many tennis balls fit in the back of a 2002 Honda Civic?
|
||||
|
||||
["What is the size of a tennis ball?", "What is the trunk size of a 2002 Honda Civic?"]
|
||||
|
||||
A: 1085 tennis balls will fit in the trunk of a Honda Civic
|
||||
|
||||
Q: Is Bob older than Tom?
|
||||
|
||||
["When was Bob born?", "What is Tom's age?"]
|
||||
|
||||
A: Yes, Bob is older than Tom. As Bob was born on 1984-01-01 and Tom is 30 years old.
|
||||
|
||||
Q: What is their age difference?
|
||||
|
||||
["What is Bob's age?", "What is Tom's age?"]
|
||||
|
||||
A: Bob is {bob_tom_age_difference} years older than Tom. As Bob is {bob_age} years old and Tom is 30 years old.
|
||||
|
||||
{chat_history}
|
||||
Q: {text}
|
||||
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
## Extract Search Type
|
||||
## --
|
||||
search_type = """
|
||||
Objective: Extract search type from user query and return information as JSON
|
||||
|
||||
Allowed search types are listed below:
|
||||
- search-type=["notes","ledger","image","music"]
|
||||
|
||||
Some examples are given below for reference:
|
||||
Q:What fiction book was I reading last week about AI starship?
|
||||
A:{ "search-type": "notes" }
|
||||
Q:Play some calm classical music?
|
||||
A:{ "search-type": "music" }
|
||||
Q:How much did I spend at Subway for dinner last time?
|
||||
A:{ "search-type": "ledger" }
|
||||
Q:What was that popular Sri lankan song that Alex had mentioned?
|
||||
A:{ "search-type": "music" }
|
||||
Q:Can you recommend a movie to watch from my notes?
|
||||
A:{ "search-type": "notes" }
|
||||
Q:When did I buy Groceries last?
|
||||
A:{ "search-type": "ledger" }
|
||||
Q:When did I go surfing last?
|
||||
A:{ "search-type": "notes" }
|
||||
Q:"""
|
Loading…
Reference in a new issue