Introduce improved answer API and prompt. Use by default in chat web interface

- Improve GPT prompt
  - Make GPT answer users query based on provided notes instead
    of summarizing the provided notes
  - Make GPT be truthful using prompt and reduced temperature
  - Use Official OpenAI Q&A prompt from cookbook as starting reference
- Replace summarize API with the improved answer API endpoint
- Default to answer type in chat web interface. The chat type is not
  fit for default consumption yet
This commit is contained in:
Debanjum Singh Solanky 2023-03-03 12:52:03 -06:00
parent 7184508784
commit c3b624e351
3 changed files with 35 additions and 8 deletions

View file

@ -50,9 +50,7 @@
document.getElementById("chat-input").value = "";
// Generate backend API URL to execute query
url = type_ === "chat"
? `/api/beta/chat?q=${encodeURIComponent(query)}`
: `/api/beta/summarize?q=${encodeURIComponent(query)}`;
url = `/api/beta/${type_}?q=${encodeURIComponent(query)}`;
// Call specified Khoj API
fetch(url)
@ -112,8 +110,8 @@
<!--Select Chat Type from: Chat, Summarize -->
<select id="chat-type" class="option" onchange="setTypeFieldInUrl(this)">
<option value="answer">Answer</option>
<option value="chat">Chat</option>
<option value="summarize">Summarize</option>
</select>
</div>
</body>

View file

@ -10,6 +10,34 @@ import openai
from khoj.utils.constants import empty_escape_sequences
def answer(text, user_query, model, api_key=None, temperature=0.3, max_tokens=200):
"""
Answer user query using provided text as reference with OpenAI's GPT
"""
# Initialize Variables
openai.api_key = api_key or os.getenv("OPENAI_API_KEY")
# Setup Prompt based on Summary Type
prompt = f"""
You are a friendly, helpful personal assistant.
Using the users notes below, answer their following question. If the answer is not contained within the notes, say "I don't know."
Notes:
{text}
Question: {user_query}
Answer (in second person):"""
# Get Response from GPT
response = openai.Completion.create(
prompt=prompt, model=model, temperature=temperature, max_tokens=max_tokens, stop='"""'
)
# Extract, Clean Message from GPT's Response
story = response["choices"][0]["text"]
return str(story).replace("\n\n", "")
def summarize(text, summary_type, model, user_query=None, api_key=None, temperature=0.5, max_tokens=200):
"""
Summarize user input using OpenAI's GPT

View file

@ -10,6 +10,7 @@ from fastapi import APIRouter
# Internal Packages
from khoj.routers.api import search
from khoj.processor.conversation.gpt import (
answer,
converse,
extract_search_type,
message_to_log,
@ -48,8 +49,8 @@ def search_beta(q: str, n: Optional[int] = 1):
return {"status": "ok", "result": search_results, "type": search_type}
@api_beta.get("/summarize")
def summarize_beta(q: str):
@api_beta.get("/answer")
def answer_beta(q: str):
# Initialize Variables
model = state.processor_config.conversation.model
api_key = state.processor_config.conversation.openai_api_key
@ -61,9 +62,9 @@ def summarize_beta(q: str):
# Converse with OpenAI GPT
result_list = search(q, n=1, r=True)
collated_result = "\n".join([item.entry for item in result_list])
logger.debug(f"Semantically Similar Notes:\n{collated_result}")
logger.debug(f"Reference Notes:\n{collated_result}")
try:
gpt_response = summarize(collated_result, summary_type="notes", user_query=q, model=model, api_key=api_key)
gpt_response = answer(collated_result, user_query=q, model=model, api_key=api_key)
status = "ok"
except Exception as e:
gpt_response = str(e)