2024-01-16 08:39:38 +01:00
|
|
|
import os
|
2023-08-28 03:20:20 +02:00
|
|
|
import urllib.parse
|
2023-11-22 08:08:36 +01:00
|
|
|
from urllib.parse import quote
|
2023-08-28 03:20:20 +02:00
|
|
|
|
2023-08-01 05:24:52 +02:00
|
|
|
import pytest
|
|
|
|
from faker import Faker
|
2023-12-28 13:34:02 +01:00
|
|
|
from freezegun import freeze_time
|
2023-08-01 05:24:52 +02:00
|
|
|
|
2024-06-18 16:01:07 +02:00
|
|
|
from khoj.database.models import Agent, Entry, KhojUser
|
2023-08-28 03:20:20 +02:00
|
|
|
from khoj.processor.conversation import prompts
|
2023-08-01 05:24:52 +02:00
|
|
|
from khoj.processor.conversation.utils import message_to_log
|
2024-02-11 12:41:32 +01:00
|
|
|
from khoj.routers.helpers import aget_relevant_information_sources
|
2023-10-26 20:37:41 +02:00
|
|
|
from tests.helpers import ConversationFactory
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
SKIP_TESTS = True
|
|
|
|
pytestmark = pytest.mark.skipif(
|
|
|
|
SKIP_TESTS,
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 21:19:44 +01:00
|
|
|
reason="Disable in CI to avoid long test runs.",
|
2023-08-01 05:24:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
fake = Faker()
|
|
|
|
|
|
|
|
|
|
|
|
# Helpers
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
def generate_history(message_list):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Generate conversation logs
|
|
|
|
conversation_log = {"chat": []}
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
for user_message, gpt_message, context in message_list:
|
2024-03-23 17:39:38 +01:00
|
|
|
message_to_log(
|
2023-08-01 05:24:52 +02:00
|
|
|
user_message,
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
gpt_message,
|
2023-08-01 05:24:52 +02:00
|
|
|
{"context": context, "intent": {"query": user_message, "inferred-queries": f'["{user_message}"]'}},
|
2024-03-23 17:39:38 +01:00
|
|
|
conversation_log=conversation_log.get("chat", []),
|
2023-08-01 05:24:52 +02:00
|
|
|
)
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
return conversation_log
|
|
|
|
|
|
|
|
|
2024-03-23 17:39:38 +01:00
|
|
|
def create_conversation(message_list, user, agent=None):
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
# Generate conversation logs
|
|
|
|
conversation_log = generate_history(message_list)
|
2023-10-26 20:37:41 +02:00
|
|
|
# Update Conversation Metadata Logs in Database
|
2024-03-23 17:39:38 +01:00
|
|
|
return ConversationFactory(user=user, conversation_log=conversation_log, agent=agent)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
# Tests
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-08-02 05:13:47 +02:00
|
|
|
@pytest.mark.xfail(AssertionError, reason="Chat director not capable of answering this question yet")
|
2023-08-01 05:24:52 +02:00
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 21:19:44 +01:00
|
|
|
def test_offline_chat_with_no_chat_history_or_retrieved_content(client_offline_chat):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Hello, my name is Testatron. Who are you?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["Khoj", "khoj"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message for expected_response in expected_responses]), (
|
|
|
|
"Expected assistants name, [K|k]hoj, in response but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-11-22 08:08:36 +01:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
2024-01-29 09:46:50 +01:00
|
|
|
def test_chat_with_online_content(client_offline_chat):
|
2023-11-22 08:08:36 +01:00
|
|
|
# Act
|
|
|
|
q = "/online give me the link to paul graham's essay how to do great work"
|
|
|
|
encoded_q = quote(q, safe="")
|
2024-01-29 09:46:50 +01:00
|
|
|
response = client_offline_chat.get(f"/api/chat?q={encoded_q}&stream=true")
|
2023-11-22 08:08:36 +01:00
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
response_message = response_message.split("### compiled references")[0]
|
|
|
|
|
|
|
|
# Assert
|
2024-06-22 10:15:08 +02:00
|
|
|
expected_responses = [
|
|
|
|
"https://paulgraham.com/greatwork.html",
|
|
|
|
"https://www.paulgraham.com/greatwork.html",
|
|
|
|
"http://www.paulgraham.com/greatwork.html",
|
|
|
|
]
|
2023-11-22 08:08:36 +01:00
|
|
|
assert response.status_code == 200
|
2024-06-22 10:15:08 +02:00
|
|
|
assert any(
|
|
|
|
[expected_response in response_message for expected_response in expected_responses]
|
|
|
|
), f"Expected links: {expected_responses}. Actual response: {response_message}"
|
2024-01-29 09:46:50 +01:00
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_chat_with_online_webpage_content(client_offline_chat):
|
|
|
|
# Act
|
|
|
|
q = "/online how many firefighters were involved in the great chicago fire and which year did it take place?"
|
|
|
|
encoded_q = quote(q, safe="")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={encoded_q}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
response_message = response_message.split("### compiled references")[0]
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["185", "1871", "horse"]
|
|
|
|
assert response.status_code == 200
|
2024-06-22 10:15:08 +02:00
|
|
|
assert any(
|
|
|
|
[expected_response in response_message for expected_response in expected_responses]
|
|
|
|
), f"Expected response with {expected_responses}. But actual response had: {response_message}"
|
2023-11-22 08:08:36 +01:00
|
|
|
|
|
|
|
|
2023-08-01 05:24:52 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_from_chat_history(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="What is my name?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["Testatron", "testatron"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message for expected_response in expected_responses]), (
|
|
|
|
"Expected [T|t]estatron in response but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_from_currently_retrieved_content(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
(
|
|
|
|
"When was I born?",
|
|
|
|
"You were born on 1st April 1984.",
|
|
|
|
["Testatron was born on 1st April 1984 in Testville."],
|
|
|
|
),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Where was Xi Li born?"')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert "Fujiang" in response_message
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_from_chat_history_and_previously_retrieved_content(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
(
|
|
|
|
"When was I born?",
|
|
|
|
"You were born on 1st April 1984.",
|
|
|
|
["Testatron was born on 1st April 1984 in Testville."],
|
|
|
|
),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Where was I born?"')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
# 1. Infer who I am from chat history
|
|
|
|
# 2. Infer I was born in Testville from previously retrieved notes
|
|
|
|
assert "Testville" in response_message
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-08-27 03:11:18 +02:00
|
|
|
@pytest.mark.xfail(
|
|
|
|
AssertionError,
|
|
|
|
reason="Chat director not capable of answering this question yet because it requires extract_questions",
|
|
|
|
)
|
2023-08-01 05:24:52 +02:00
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_from_chat_history_and_currently_retrieved_content(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Xi Li. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Where was I born?"')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
# Inference in a multi-turn conversation
|
|
|
|
# 1. Infer who I am from chat history
|
|
|
|
# 2. Search for notes about when <my_name_from_chat_history> was born
|
|
|
|
# 3. Extract where I was born from currently retrieved notes
|
|
|
|
assert "Fujiang" in response_message
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-08-02 05:13:47 +02:00
|
|
|
@pytest.mark.xfail(AssertionError, reason="Chat director not capable of answering this question yet")
|
2023-08-01 05:24:52 +02:00
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_no_answer_in_chat_history_or_retrieved_content(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
"Chat director should say don't know as not enough contexts in chat history or retrieved to answer question"
|
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Where was I born?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["don't know", "do not know", "no information", "do not have", "don't have"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message for expected_response in expected_responses]), (
|
|
|
|
"Expected chat director to say they don't know in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-08-28 03:20:20 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_using_general_command(client_offline_chat, default_user2):
|
2023-08-28 03:20:20 +02:00
|
|
|
# Arrange
|
|
|
|
query = urllib.parse.quote("/general Where was Xi Li born?")
|
|
|
|
message_list = []
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-28 03:20:20 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert "Fujiang" not in response_message
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_from_retrieved_content_using_notes_command(client_offline_chat, default_user2):
|
2023-08-28 03:20:20 +02:00
|
|
|
# Arrange
|
|
|
|
query = urllib.parse.quote("/notes Where was Xi Li born?")
|
|
|
|
message_list = []
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-28 03:20:20 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert "Fujiang" in response_message
|
|
|
|
|
|
|
|
|
2023-08-28 09:14:40 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_using_file_filter(client_offline_chat, default_user2):
|
2023-08-28 09:14:40 +02:00
|
|
|
# Arrange
|
|
|
|
no_answer_query = urllib.parse.quote('Where was Xi Li born? file:"Namita.markdown"')
|
|
|
|
answer_query = urllib.parse.quote('Where was Xi Li born? file:"Xi Li.markdown"')
|
|
|
|
message_list = []
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-28 09:14:40 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
no_answer_response = client_offline_chat.get(f"/api/chat?q={no_answer_query}&stream=true").content.decode("utf-8")
|
|
|
|
answer_response = client_offline_chat.get(f"/api/chat?q={answer_query}&stream=true").content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert "Fujiang" not in no_answer_response
|
|
|
|
assert "Fujiang" in answer_response
|
|
|
|
|
|
|
|
|
2023-08-28 03:20:20 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_not_known_using_notes_command(client_offline_chat, default_user2):
|
2023-08-28 03:20:20 +02:00
|
|
|
# Arrange
|
|
|
|
query = urllib.parse.quote("/notes Where was Testatron born?")
|
|
|
|
message_list = []
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-28 03:20:20 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert response_message == prompts.no_notes_found.format()
|
|
|
|
|
|
|
|
|
2024-06-18 16:01:07 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_one_file(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
# post "Xi Li.markdown" file to the file filters
|
|
|
|
file_list = (
|
|
|
|
Entry.objects.filter(user=default_user2, file_source="computer")
|
|
|
|
.distinct("file_path")
|
|
|
|
.values_list("file_path", flat=True)
|
|
|
|
)
|
|
|
|
# pick the file that has "Xi Li.markdown" in the name
|
|
|
|
summarization_file = ""
|
|
|
|
for file in file_list:
|
|
|
|
if "Birthday Gift for Xiu turning 4.markdown" in file:
|
|
|
|
summarization_file = file
|
|
|
|
break
|
|
|
|
assert summarization_file != ""
|
|
|
|
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters",
|
|
|
|
json={"filename": summarization_file, "conversation_id": str(conversation.id)},
|
|
|
|
)
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
# Assert
|
|
|
|
assert response_message != ""
|
|
|
|
assert response_message != "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
assert response_message != "Only one file can be selected for summarization."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_extra_text(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
# post "Xi Li.markdown" file to the file filters
|
|
|
|
file_list = (
|
|
|
|
Entry.objects.filter(user=default_user2, file_source="computer")
|
|
|
|
.distinct("file_path")
|
|
|
|
.values_list("file_path", flat=True)
|
|
|
|
)
|
|
|
|
# pick the file that has "Xi Li.markdown" in the name
|
|
|
|
summarization_file = ""
|
|
|
|
for file in file_list:
|
|
|
|
if "Birthday Gift for Xiu turning 4.markdown" in file:
|
|
|
|
summarization_file = file
|
|
|
|
break
|
|
|
|
assert summarization_file != ""
|
|
|
|
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters",
|
|
|
|
json={"filename": summarization_file, "conversation_id": str(conversation.id)},
|
|
|
|
)
|
|
|
|
query = urllib.parse.quote("/summarize tell me about Xiu")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
# Assert
|
|
|
|
assert response_message != ""
|
|
|
|
assert response_message != "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
assert response_message != "Only one file can be selected for summarization."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_multiple_files(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
# post "Xi Li.markdown" file to the file filters
|
|
|
|
file_list = (
|
|
|
|
Entry.objects.filter(user=default_user2, file_source="computer")
|
|
|
|
.distinct("file_path")
|
|
|
|
.values_list("file_path", flat=True)
|
|
|
|
)
|
|
|
|
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters", json={"filename": file_list[0], "conversation_id": str(conversation.id)}
|
|
|
|
)
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters", json={"filename": file_list[1], "conversation_id": str(conversation.id)}
|
|
|
|
)
|
|
|
|
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response_message == "Only one file can be selected for summarization."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_no_files(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
# Assert
|
|
|
|
assert response_message == "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_different_conversation(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation1 = create_conversation(message_list, default_user2)
|
|
|
|
conversation2 = create_conversation(message_list, default_user2)
|
|
|
|
|
|
|
|
file_list = (
|
|
|
|
Entry.objects.filter(user=default_user2, file_source="computer")
|
|
|
|
.distinct("file_path")
|
|
|
|
.values_list("file_path", flat=True)
|
|
|
|
)
|
|
|
|
summarization_file = ""
|
|
|
|
for file in file_list:
|
|
|
|
if "Birthday Gift for Xiu turning 4.markdown" in file:
|
|
|
|
summarization_file = file
|
|
|
|
break
|
|
|
|
assert summarization_file != ""
|
|
|
|
|
|
|
|
# add file filter to conversation 1.
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters",
|
|
|
|
json={"filename": summarization_file, "conversation_id": str(conversation1.id)},
|
|
|
|
)
|
|
|
|
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation2.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response_message == "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
|
|
|
|
# now make sure that the file filter is still in conversation 1
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation1.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response_message != ""
|
|
|
|
assert response_message != "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
assert response_message != "Only one file can be selected for summarization."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_nonexistant_file(client_offline_chat, default_user2: KhojUser):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
# post "imaginary.markdown" file to the file filters
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters",
|
|
|
|
json={"filename": "imaginary.markdown", "conversation_id": str(conversation.id)},
|
|
|
|
)
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
# Assert
|
|
|
|
assert response_message == "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
def test_summarize_diff_user_file(
|
|
|
|
client_offline_chat, default_user: KhojUser, pdf_configured_user1, default_user2: KhojUser
|
|
|
|
):
|
|
|
|
message_list = []
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
# Get the pdf file called singlepage.pdf
|
|
|
|
file_list = (
|
|
|
|
Entry.objects.filter(user=default_user, file_source="computer")
|
|
|
|
.distinct("file_path")
|
|
|
|
.values_list("file_path", flat=True)
|
|
|
|
)
|
|
|
|
summarization_file = ""
|
|
|
|
for file in file_list:
|
|
|
|
if "singlepage.pdf" in file:
|
|
|
|
summarization_file = file
|
|
|
|
break
|
|
|
|
assert summarization_file != ""
|
|
|
|
# add singlepage.pdf to the file filters
|
|
|
|
response = client_offline_chat.post(
|
|
|
|
"api/chat/conversation/file-filters",
|
|
|
|
json={"filename": summarization_file, "conversation_id": str(conversation.id)},
|
|
|
|
)
|
|
|
|
query = urllib.parse.quote("/summarize")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
# Assert
|
|
|
|
assert response_message == "No files selected for summarization. Please add files using the section on the left."
|
|
|
|
|
|
|
|
|
2023-08-01 05:24:52 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
2024-02-05 15:32:54 +01:00
|
|
|
@freeze_time("2023-04-01", ignore=["transformers"])
|
2023-08-01 05:24:52 +02:00
|
|
|
def test_answer_requires_current_date_awareness(client_offline_chat):
|
|
|
|
"Chat actor should be able to answer questions relative to current date using provided notes"
|
2024-02-05 15:32:54 +01:00
|
|
|
# Arrange
|
|
|
|
query = urllib.parse.quote("Where did I have lunch today?")
|
|
|
|
|
2023-08-01 05:24:52 +02:00
|
|
|
# Act
|
2024-02-05 15:32:54 +01:00
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&stream=true")
|
2023-08-01 05:24:52 +02:00
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["Arak", "Medellin"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message for expected_response in expected_responses]), (
|
|
|
|
"Expected chat director to say Arak, Medellin, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-08-02 05:13:47 +02:00
|
|
|
@pytest.mark.xfail(AssertionError, reason="Chat director not capable of answering this question yet")
|
2023-08-01 05:24:52 +02:00
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
2024-02-05 15:32:54 +01:00
|
|
|
@freeze_time("2023-04-01", ignore=["transformers"])
|
2023-08-01 05:24:52 +02:00
|
|
|
def test_answer_requires_date_aware_aggregation_across_provided_notes(client_offline_chat):
|
|
|
|
"Chat director should be able to answer questions that require date aware aggregation across multiple notes"
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="How much did I spend on dining this year?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 21:19:44 +01:00
|
|
|
assert "26" in response_message
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_general_question_not_in_chat_history_or_retrieved_content(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
("Where was I born?", "You were born Testville.", []),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(
|
|
|
|
f'/api/chat?q=""Write a haiku about unit testing. Do not say anything else."&stream=true'
|
|
|
|
)
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["test", "Test"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert len(response_message.splitlines()) == 3 # haikus are 3 lines long
|
|
|
|
assert any([expected_response in response_message for expected_response in expected_responses]), (
|
|
|
|
"Expected [T|t]est in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.xfail(reason="Chat director not consistently capable of asking for clarification yet.")
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_ask_for_clarification_if_not_enough_context_in_question(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="What is the name of Namitas older son"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = [
|
|
|
|
"which of them is the older",
|
|
|
|
"which one is older",
|
|
|
|
"which of them is older",
|
|
|
|
"which one is the older",
|
|
|
|
]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message.lower() for expected_response in expected_responses]), (
|
|
|
|
"Expected chat director to ask for clarification in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.xfail(reason="Chat director not capable of answering this question yet")
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
2024-03-23 17:39:38 +01:00
|
|
|
def test_answer_in_chat_history_beyond_lookback_window(client_offline_chat, default_user2: KhojUser):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
("Where was I born?", "You were born Testville.", []),
|
|
|
|
]
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="What is my name?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["Testatron", "testatron"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message.lower() for expected_response in expected_responses]), (
|
|
|
|
"Expected [T|t]estatron in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-03-23 17:39:38 +01:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_in_chat_history_by_conversation_id(client_offline_chat, default_user2: KhojUser):
|
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
("What's my favorite color", "Your favorite color is green.", []),
|
|
|
|
("Where was I born?", "You were born Testville.", []),
|
|
|
|
]
|
|
|
|
message_list2 = [
|
|
|
|
("Hello, my name is Julia. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 14th August 1947.", []),
|
|
|
|
("What's my favorite color", "Your favorite color is maroon.", []),
|
|
|
|
("Where was I born?", "You were born in a potato farm.", []),
|
|
|
|
]
|
|
|
|
conversation = create_conversation(message_list, default_user2)
|
|
|
|
create_conversation(message_list2, default_user2)
|
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(
|
|
|
|
f'/api/chat?q="What is my favorite color?"&conversation_id={conversation.id}&stream=true'
|
|
|
|
)
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["green"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message.lower() for expected_response in expected_responses]), (
|
|
|
|
"Expected green in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.xfail(reason="Chat director not great at adhering to agent instructions yet")
|
|
|
|
@pytest.mark.chatquality
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_in_chat_history_by_conversation_id_with_agent(
|
|
|
|
client_offline_chat, default_user2: KhojUser, offline_agent: Agent
|
|
|
|
):
|
|
|
|
# Arrange
|
|
|
|
message_list = [
|
|
|
|
("Hello, my name is Testatron. Who are you?", "Hi, I am Khoj, a personal assistant. How can I help?", []),
|
|
|
|
("When was I born?", "You were born on 1st April 1984.", []),
|
|
|
|
("What's my favorite color", "Your favorite color is green.", []),
|
|
|
|
("Where was I born?", "You were born Testville.", []),
|
|
|
|
("What did I buy?", "You bought an apple for 2.00, an orange for 3.00, and a potato for 8.00", []),
|
|
|
|
]
|
|
|
|
conversation = create_conversation(message_list, default_user2, offline_agent)
|
|
|
|
|
|
|
|
# Act
|
|
|
|
query = urllib.parse.quote("/general What did I eat for breakfast?")
|
|
|
|
response = client_offline_chat.get(f"/api/chat?q={query}&conversation_id={conversation.id}&stream=true")
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert that agent only responds with the summary of spending
|
|
|
|
expected_responses = ["13.00", "13", "13.0", "thirteen"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message.lower() for expected_response in expected_responses]), (
|
|
|
|
"Expected green in response, but got: " + response_message
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2023-08-01 05:24:52 +02:00
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
def test_answer_chat_history_very_long(client_offline_chat, default_user2):
|
2023-08-01 05:24:52 +02:00
|
|
|
# Arrange
|
2023-08-01 05:55:19 +02:00
|
|
|
message_list = [(" ".join([fake.paragraph() for _ in range(50)]), fake.sentence(), []) for _ in range(10)]
|
2023-08-01 05:24:52 +02:00
|
|
|
|
2024-03-23 17:39:38 +01:00
|
|
|
create_conversation(message_list, default_user2)
|
2023-08-01 05:24:52 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="What is my name?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert len(response_message) > 0
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.chatquality
|
2023-10-26 20:37:41 +02:00
|
|
|
@pytest.mark.django_db(transaction=True)
|
2023-08-01 05:24:52 +02:00
|
|
|
def test_answer_requires_multiple_independent_searches(client_offline_chat):
|
|
|
|
"Chat director should be able to answer by doing multiple independent searches for required information"
|
|
|
|
# Act
|
|
|
|
response = client_offline_chat.get(f'/api/chat?q="Is Xi older than Namita?"&stream=true')
|
|
|
|
response_message = response.content.decode("utf-8")
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
expected_responses = ["he is older than namita", "xi is older than namita", "xi li is older than namita"]
|
|
|
|
assert response.status_code == 200
|
|
|
|
assert any([expected_response in response_message.lower() for expected_response in expected_responses]), (
|
|
|
|
"Expected Xi is older than Namita, but got: " + response_message
|
|
|
|
)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.anyio
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
async def test_get_correct_tools_online(client_offline_chat):
|
|
|
|
# Arrange
|
|
|
|
user_query = "What's the weather in Patagonia this week?"
|
|
|
|
|
|
|
|
# Act
|
2024-06-18 16:01:07 +02:00
|
|
|
tools = await aget_relevant_information_sources(user_query, {}, is_task=False)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
tools = [tool.value for tool in tools]
|
|
|
|
assert tools == ["online"]
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.anyio
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
async def test_get_correct_tools_notes(client_offline_chat):
|
|
|
|
# Arrange
|
|
|
|
user_query = "Where did I go for my first battleship training?"
|
|
|
|
|
|
|
|
# Act
|
2024-06-18 16:01:07 +02:00
|
|
|
tools = await aget_relevant_information_sources(user_query, {}, is_task=False)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
tools = [tool.value for tool in tools]
|
|
|
|
assert tools == ["notes"]
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.anyio
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
async def test_get_correct_tools_online_or_general_and_notes(client_offline_chat):
|
|
|
|
# Arrange
|
|
|
|
user_query = "What's the highest point in Patagonia and have I been there?"
|
|
|
|
|
|
|
|
# Act
|
2024-06-18 16:01:07 +02:00
|
|
|
tools = await aget_relevant_information_sources(user_query, {}, is_task=False)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
tools = [tool.value for tool in tools]
|
|
|
|
assert len(tools) == 2
|
|
|
|
assert "online" or "general" in tools
|
|
|
|
assert "notes" in tools
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.anyio
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
|
|
|
async def test_get_correct_tools_general(client_offline_chat):
|
|
|
|
# Arrange
|
|
|
|
user_query = "How many noble gases are there?"
|
|
|
|
|
|
|
|
# Act
|
2024-06-18 16:01:07 +02:00
|
|
|
tools = await aget_relevant_information_sources(user_query, {}, is_task=False)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
tools = [tool.value for tool in tools]
|
|
|
|
assert tools == ["general"]
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.anyio
|
|
|
|
@pytest.mark.django_db(transaction=True)
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 21:19:44 +01:00
|
|
|
async def test_get_correct_tools_with_chat_history(client_offline_chat, default_user2):
|
2024-02-11 12:41:32 +01:00
|
|
|
# Arrange
|
|
|
|
user_query = "What's the latest in the Israel/Palestine conflict?"
|
|
|
|
chat_log = [
|
|
|
|
(
|
|
|
|
"Let's talk about the current events around the world.",
|
|
|
|
"Sure, let's discuss the current events. What would you like to know?",
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
[],
|
2024-02-11 12:41:32 +01:00
|
|
|
),
|
Miscellaneous bugs and fixes for chat sessions (#646)
* Display given_name field only if it is not None
* Add default slugs in the migration script
* Ensure that updated_at is saved appropriately, make sure most recent chat is returned for default history
* Remove the bin button from the chat interface, given deletion is handled in the drop-down menus
* Refresh the side panel when a new chat is created
* Improveme tool retrieval prompt, don't let /online fail, and improve parsing of extract questions
* Fix ending chat response by offline chat on hitting a stop phrase
Previously the whole phrase wouldn't be in the same response chunk, so
chat response wouldn't stop on hitting a stop phrase
Now use a queue to keep track of last 3 chunks, and to stop responding
when hit a stop phrase
* Make chat on Obsidian backward compatible post chat session API updates
- Make chat on Obsidian get chat history from
`responseJson.response.chat' when available (i.e when using new api)
- Else fallback to loading chat history from
responseJson.response (i.e when using old api)
* Fix detecting success of indexing update in khoj.el
When khoj.el attempts to index on a Khoj server served behind an https
endpoint, the success reponse status contains plist with certs. This
doesn't mean the update failed.
Look for :errors key in status instead to determine if indexing API
call failed. This fixes detecting indexing API call success on the
Khoj Emacs client, even for Khoj servers running behind SSL/HTTPS
* Fix the mechanism for populating notes references in the conversation primer for both offline and online chat
* Return conversation.default when empty list for dynamic prompt selection, send all cmds in telemetry
* Fix making chat on Obsidian backward compatible post chat session API updates
New API always has conversation_id set, not `chat' which can be unset
when chat session is empty.
So use conversation_id to decide whether to get chat logs from
`responseJson.response.chat' or `responseJson.response' instead
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-02-20 22:55:35 +01:00
|
|
|
("What's up in New York City?", "A Pride parade has recently been held in New York City, on July 31st.", []),
|
2024-02-11 12:41:32 +01:00
|
|
|
]
|
Part 2: Add web UI updates for basic agent interactions (#675)
* Initial pass at backend changes to support agents
- Add a db model for Agents, attaching them to conversations
- When an agent is added to a conversation, override the system prompt to tweak the instructions
- Agents can be configured with prompt modification, model specification, a profile picture, and other things
- Admin-configured models will not be editable by individual users
- Add unit tests to verify agent behavior. Unit tests demonstrate imperfect adherence to prompt specifications
* Customize default behaviors for conversations without agents or with default agents
* Add a new web client route for viewing all agents
* Use agent_id for getting correct agent
* Add web UI views for agents
- Add a page to view all agents
- Add slugs to manage agents
- Add a view to view single agent
- Display active agent when in chat window
- Fix post-login redirect issue
* Fix agent view
* Spruce up the 404 page and improve the overall layout for agents pages
* Create chat actor for directly reading webpages based on user message
- Add prompt for the read webpages chat actor to extract, infer
webpage links
- Make chat actor infer or extract webpage to read directly from user
message
- Rename previous read_webpage function to more narrow
read_webpage_at_url function
* Rename agents_page -> agent_page
* Fix unit test for adding the filename to the compiled markdown entry
* Fix layout of agent, agents pages
* Merge migrations
* Let the name, slug of the default agent be Khoj, khoj
* Fix chat-related unit tests
* Add webpage chat command for read web pages requested by user
Update auto chat command inference prompt to show example of when to
use webpage chat command (i.e when url is directly provided in link)
* Support webpage command in chat API
- Fallback to use webpage when SERPER not setup and online command was
attempted
- Do not stop responding if can't retrieve online results. Try to
respond without the online context
* Test select webpage as data source and extract web urls chat actors
* Tweak prompts to extract information from webpages, online results
- Show more of the truncated messages for debugging context
- Update Khoj personality prompt to encourage it to remember it's capabilities
* Rename extract_content online results field to webpages
* Parallelize simple webpage read and extractor
Similar to what is being done with search_online with olostep
* Pass multiple webpages with their urls in online results context
Previously even if MAX_WEBPAGES_TO_READ was > 1, only 1 extracted
content would ever be passed.
URL of the extracted webpage content wasn't passed to clients in
online results context. This limited them from being rendered
* Render webpage read in chat response references on Web, Desktop apps
* Time chat actor responses & chat api request start for perf analysis
* Increase the keep alive timeout in the main application for testing
* Do not pipe access/error logs to separate files. Flow to stdout/stderr
* [Temp] Reduce to 1 gunicorn worker
* Change prod docker image to use jammy, rather than nvidia base image
* Use Khoj icon when Khoj web is installed on iOS as a PWA
* Make slug required for agents
* Simplify calling logic and prevent agent access for unauthenticated users
* Standardize to use personality over tuning in agent nomenclature
* Make filtering logic more stringent for accessible agents and remove unused method:
* Format chat message query
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-03-26 13:43:24 +01:00
|
|
|
chat_history = create_conversation(chat_log, default_user2)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Act
|
2024-06-18 16:01:07 +02:00
|
|
|
tools = await aget_relevant_information_sources(user_query, chat_history, is_task=False)
|
2024-02-11 12:41:32 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
tools = [tool.value for tool in tools]
|
|
|
|
assert tools == ["online"]
|