mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 15:38:55 +01:00
Resolve mrege conflicts with updated processor conversation data model
This commit is contained in:
commit
7ca4fc3453
3 changed files with 55 additions and 21 deletions
42
src/main.py
42
src/main.py
|
@ -15,7 +15,7 @@ from src.utils.helpers import get_absolute_path
|
|||
from src.utils.cli import cli
|
||||
from src.utils.config import SearchType, SearchModels, TextSearchConfig, ImageSearchConfig, SearchConfig, ProcessorConfig, ConversationProcessorConfig
|
||||
from src.utils.rawconfig import FullConfig
|
||||
from src.processor.conversation.gpt import converse, message_to_prompt
|
||||
from src.processor.conversation.gpt import converse, message_to_log, message_to_prompt, understand
|
||||
|
||||
# Application Global State
|
||||
model = SearchModels()
|
||||
|
@ -114,13 +114,16 @@ def regenerate(t: Optional[SearchType] = None):
|
|||
@app.get('/chat')
|
||||
def chat(q: str):
|
||||
# Load Conversation History
|
||||
conversation_history = processor_config.conversation.conversation_history
|
||||
chat_log = processor_config.conversation.chat_log
|
||||
meta_log = processor_config.conversation.meta_log
|
||||
|
||||
# Converse with OpenAI GPT
|
||||
gpt_response = converse(q, conversation_history, api_key=processor_config.conversation.openai_api_key)
|
||||
user_message_metadata = understand(q, api_key=processor_config.conversation.openai_api_key)
|
||||
gpt_response = converse(q, chat_log, api_key=processor_config.conversation.openai_api_key)
|
||||
|
||||
# Update Conversation History
|
||||
processor_config.conversation.conversation_history = message_to_prompt(q, conversation_history, gpt_response)
|
||||
processor_config.conversation.chat_log = message_to_prompt(q, chat_log, gpt_message=gpt_response)
|
||||
processor_config.conversation.meta_log= message_to_log(q, user_message_metadata, gpt_response, meta_log)
|
||||
|
||||
return {'status': 'ok', 'response': gpt_response}
|
||||
|
||||
|
@ -158,31 +161,44 @@ def initialize_processor(verbose):
|
|||
# Initialize Conversation Processor
|
||||
processor_config.conversation = ConversationProcessorConfig(config.processor.conversation, verbose)
|
||||
|
||||
# Load or Initialize Conversation History from Disk
|
||||
conversation_logfile = processor_config.conversation.conversation_logfile
|
||||
if processor_config.conversation.verbose:
|
||||
print('Saving conversation logs to disk...')
|
||||
print('INFO:\tLoading conversation logs from disk...')
|
||||
|
||||
if conversation_logfile.expanduser().absolute().is_file():
|
||||
# Load Metadata Logs from Conversation Logfile
|
||||
with open(get_absolute_path(conversation_logfile), 'r') as f:
|
||||
processor_config.conversation.conversation_history = json.load(f).get('chat', '')
|
||||
processor_config.conversation.meta_log = json.load(f)
|
||||
|
||||
# Extract Chat Logs from Metadata
|
||||
processor_config.conversation.chat_log = ''.join(
|
||||
[f'\n{item["by"]}: {item["message"]}'
|
||||
for item
|
||||
in processor_config.conversation.meta_log])
|
||||
|
||||
print('INFO:\tConversation logs loaded from disk.')
|
||||
else:
|
||||
processor_config.conversation.conversation_history = ''
|
||||
# Initialize Conversation Logs
|
||||
processor_config.conversation.meta_log = []
|
||||
processor_config.conversation.chat_log = ""
|
||||
|
||||
return processor_config
|
||||
|
||||
|
||||
@app.on_event('shutdown')
|
||||
def shutdown_event():
|
||||
if processor_config.conversation.verbose:
|
||||
print('Saving conversation logs to disk...')
|
||||
# No need to create empty log file
|
||||
if not processor_config.conversation.meta_log:
|
||||
return
|
||||
elif processor_config.conversation.verbose:
|
||||
print('INFO:\tSaving conversation logs to disk...')
|
||||
|
||||
# Save Conversation History to Disk
|
||||
# Save Conversation Metadata Logs to Disk
|
||||
conversation_logfile = get_absolute_path(processor_config.conversation.conversation_logfile)
|
||||
with open(conversation_logfile, "w+", encoding='utf-8') as logfile:
|
||||
json.dump({"chat": processor_config.conversation.conversation_history}, logfile)
|
||||
json.dump(processor_config.conversation.meta_log, logfile)
|
||||
|
||||
print('Conversation logs saved to disk.')
|
||||
print('INFO:\tConversation logs saved to disk.')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# Standard Packages
|
||||
import os
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
# External Packages
|
||||
import openai
|
||||
|
@ -11,12 +13,12 @@ def understand(text, api_key=None, temperature=0.5, max_tokens=100):
|
|||
"""
|
||||
# Initialize Variables
|
||||
openai.api_key = api_key or os.getenv("OPENAI_API_KEY")
|
||||
understand_primer="Extract information from each chat message\n\nremember(memory-type, data);\nmemory-type=[\"companion\", \"notes\", \"ledger\", \"image\", \"music\"]\nsearch(search-type, data);\nsearch-type=[\"google\", \"youtube\"]\ngenerate(activity);\nactivity=[\"paint\",\"write\", \"chat\"]\ntrigger-emotion(emotion);\nemotion=[\"happy\",\"confidence\",\"fear\",\"surprise\",\"sadness\",\"disgust\",\"anger\", \"curiosity\", \"calm\"]\n\nQ: How are you doing?\nA: activity(\"chat\"); trigger-emotion(\"surprise\")\nQ: Do you remember what I told you about my brother Antoine when we were at the beach?\nA: remember(\"notes\", \"Brother Antoine when we were at the beach\"); trigger-emotion(\"curiosity\");\nQ: what did we talk about last time?\nA: remember(\"notes\", \"talk last time\"); trigger-emotion(\"curiosity\");\nQ: Let's make some drawings!\nA: generate(\"paint\"); trigger-emotion(\"happy\");\nQ: Do you know anything about Lebanon?\nA: search(\"google\", \"lebanon\"); trigger-emotion(\"confidence\");\nQ: Find a video about a panda rolling in the grass\nA: search(\"youtube\",\"panda rolling in the grass\"); trigger-emotion(\"happy\"); \nQ: Tell me a scary story\nA: generate(\"write\" \"A story about some adventure\"); trigger-emotion(\"fear\");\nQ: What fiction book was I reading last week about AI starship?\nA: remember(\"notes\", \"read fiction book about AI starship last week\"); trigger-emotion(\"curiosity\");\nQ: How much did I spend at Subway for dinner last time?\nA: remember(\"ledger\", \"last Subway dinner\"); trigger-emotion(\"curiosity\");\nQ: I'm feeling sleepy\nA: activity(\"chat\"); trigger-emotion(\"calm\")\nQ: What was that popular Sri lankan song that Alex showed me recently?\nA: remember(\"music\", \"popular Sri lankan song that Alex showed recently\"); trigger-emotion(\"curiosity\"); \nQ: You're pretty funny!\nA: activity(\"chat\"); trigger-emotion(\"pride\")"
|
||||
understand_primer = "Extract intent, trigger emotion information as JSON from each chat message\n\n- intent\n - remember(memory-type, data);\n - memory-type=[\"companion\", \"notes\", \"ledger\", \"image\", \"music\"]\n - search(search-type, data);\n - search-type=[\"google\"]\n - generate(activity);\n - activity=[\"paint\",\"write\", \"chat\"]\n- trigger-emotion(emotion)\n - emotion=[\"happy\",\"confidence\",\"fear\",\"surprise\",\"sadness\",\"disgust\",\"anger\", \"curiosity\", \"calm\"]\n\nQ: How are you doing?\nA: { \"intent\": [\"activity\", \"chat\"], \"trigger-emotion\": \"surprise\" }\nQ: Do you remember what I told you about my brother Antoine when we were at the beach?\nA: { \"intent\": [\"remember\", \"notes\", \"Brother Antoine when we were at the beach\"], \"trigger-emotion\": \"curiosity\" }\nQ: what did we talk about last time?\nA: { \"intent\": [\"remember\", \"notes\", \"talk last time\"], \"trigger-emotion\": \"curiosity\" }\nQ: Let's make some drawings!\nA: { \"intent\": [\"generate\", \"paint\"], \"trigger-emotion: \"happy\" }\nQ: Do you know anything about Lebanon cuisine in the 18th century?\nA: { \"intent\": [\"search\", \"google\", \"lebanon cusine in the 18th century\"], \"trigger-emotion; \"confidence\" }\nQ: Tell me a scary story\nA: { \"intent\": [\"generate\", \"write\", \"A story about some adventure\"], \"trigger-emotion\": \"fear\" }\nQ: What fiction book was I reading last week about AI starship?\nA: { \"intent\": [\"remember\", \"notes\", \"fiction book about AI starship last week\"], \"trigger-emotion\": \"curiosity\" }\nQ: How much did I spend at Subway for dinner last time?\nA: { \"intent\": [\"remember\", \"ledger\", \"last Subway dinner\"], \"trigger-emotion\": \"curiosity\" }\nQ: I'm feeling sleepy\nA: { \"intent\": [\"activity\", \"chat\"], \"trigger-emotion\": \"calm\" }\nQ: What was that popular Sri lankan song that Alex had mentioned?\nA: { \"intent\": [\"remember\", \"music\", \"popular Sri lankan song mentioned by Alex\"], \"trigger-emotion\": \"curiosity\" } \nQ: You're pretty funny!\nA: { \"intent\": [\"activity\", \"chat\"], \"trigger-emotion\": \"pride\" }\nQ: Can you recommend a movie to watch from my notes?\nA: { \"intent\": [\"remember\", \"notes\", \"movie to watch\"], \"trigger-emotion\": \"curiosity\" }",
|
||||
|
||||
# Setup Prompt with Understand Primer
|
||||
prompt = message_to_prompt(text, understand_primer, start_sequence="\nA:", restart_sequence="\nQ:")
|
||||
|
||||
# Get Reponse from GPT
|
||||
# Get Response from GPT
|
||||
response = openai.Completion.create(
|
||||
engine="davinci",
|
||||
prompt=prompt,
|
||||
|
@ -55,7 +57,7 @@ def converse(text, conversation_history=None, api_key=None, temperature=0.9, max
|
|||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0.6,
|
||||
stop=["\n", " Human:", " AI:"])
|
||||
stop=["\n", "Human:", "AI:"])
|
||||
|
||||
# Extract, Clean Message from GPT's Response
|
||||
story = response['choices'][0]['text']
|
||||
|
@ -63,8 +65,22 @@ def converse(text, conversation_history=None, api_key=None, temperature=0.9, max
|
|||
|
||||
|
||||
def message_to_prompt(user_message, conversation_history="", gpt_message=None, start_sequence="\nAI:", restart_sequence="\nHuman:"):
|
||||
"""Create prompt for GPT from message"""
|
||||
if gpt_message:
|
||||
return f"{conversation_history}{restart_sequence} {user_message}{start_sequence} {gpt_message}"
|
||||
else:
|
||||
return f"{conversation_history}{restart_sequence} {user_message}{start_sequence}"
|
||||
"""Create prompt for GPT from messages and conversation history"""
|
||||
gpt_message = f" {gpt_message}" if gpt_message else ""
|
||||
|
||||
return f"{conversation_history}{restart_sequence} {user_message}{start_sequence}{gpt_message}"
|
||||
|
||||
|
||||
def message_to_log(user_message, user_message_metadata, gpt_message, conversation_log=[]):
|
||||
"""Create json logs from messages, metadata for conversation log"""
|
||||
# Create json log from Human's message
|
||||
human_log = json.loads(user_message_metadata)
|
||||
human_log["message"] = user_message
|
||||
human_log["by"] = "Human"
|
||||
human_log["created"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# Create json log from GPT's response
|
||||
ai_log = {"message": gpt_message, "by": "AI", "created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
|
||||
|
||||
conversation_log.extend([human_log, ai_log])
|
||||
return conversation_log
|
||||
|
|
|
@ -74,6 +74,8 @@ class ConversationProcessorConfig():
|
|||
def __init__(self, processor_config: ProcessorConversationConfig, verbose: bool):
|
||||
self.openai_api_key = processor_config.open_api_key
|
||||
self.conversation_logfile = Path(processor_config.conversation_logfile)
|
||||
self.chat_log = ''
|
||||
self.meta_log = []
|
||||
self.conversation_history = Path(processor_config.conversation_history)
|
||||
self.verbose = verbose
|
||||
|
||||
|
|
Loading…
Reference in a new issue