mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-12-12 08:43:03 +01:00
fdf78525b4
* Initial pass at backend changes to support agents - Add a db model for Agents, attaching them to conversations - When an agent is added to a conversation, override the system prompt to tweak the instructions - Agents can be configured with prompt modification, model specification, a profile picture, and other things - Admin-configured models will not be editable by individual users - Add unit tests to verify agent behavior. Unit tests demonstrate imperfect adherence to prompt specifications * Customize default behaviors for conversations without agents or with default agents * Add a new web client route for viewing all agents * Use agent_id for getting correct agent * Add web UI views for agents - Add a page to view all agents - Add slugs to manage agents - Add a view to view single agent - Display active agent when in chat window - Fix post-login redirect issue * Fix agent view * Spruce up the 404 page and improve the overall layout for agents pages * Create chat actor for directly reading webpages based on user message - Add prompt for the read webpages chat actor to extract, infer webpage links - Make chat actor infer or extract webpage to read directly from user message - Rename previous read_webpage function to more narrow read_webpage_at_url function * Rename agents_page -> agent_page * Fix unit test for adding the filename to the compiled markdown entry * Fix layout of agent, agents pages * Merge migrations * Let the name, slug of the default agent be Khoj, khoj * Fix chat-related unit tests * Add webpage chat command for read web pages requested by user Update auto chat command inference prompt to show example of when to use webpage chat command (i.e when url is directly provided in link) * Support webpage command in chat API - Fallback to use webpage when SERPER not setup and online command was attempted - Do not stop responding if can't retrieve online results. Try to respond without the online context * Test select webpage as data source and extract web urls chat actors * Tweak prompts to extract information from webpages, online results - Show more of the truncated messages for debugging context - Update Khoj personality prompt to encourage it to remember it's capabilities * Rename extract_content online results field to webpages * Parallelize simple webpage read and extractor Similar to what is being done with search_online with olostep * Pass multiple webpages with their urls in online results context Previously even if MAX_WEBPAGES_TO_READ was > 1, only 1 extracted content would ever be passed. URL of the extracted webpage content wasn't passed to clients in online results context. This limited them from being rendered * Render webpage read in chat response references on Web, Desktop apps * Time chat actor responses & chat api request start for perf analysis * Increase the keep alive timeout in the main application for testing * Do not pipe access/error logs to separate files. Flow to stdout/stderr * [Temp] Reduce to 1 gunicorn worker * Change prod docker image to use jammy, rather than nvidia base image * Use Khoj icon when Khoj web is installed on iOS as a PWA * Make slug required for agents * Simplify calling logic and prevent agent access for unauthenticated users * Standardize to use personality over tuning in agent nomenclature * Make filtering logic more stringent for accessible agents and remove unused method: * Format chat message query --------- Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
118 lines
4 KiB
Python
118 lines
4 KiB
Python
import os
|
|
import secrets
|
|
|
|
import numpy as np
|
|
import psutil
|
|
import pytest
|
|
from scipy.stats import linregress
|
|
|
|
from khoj.processor.embeddings import EmbeddingsModel
|
|
from khoj.processor.tools.online_search import (
|
|
read_webpage_at_url,
|
|
read_webpage_with_olostep,
|
|
)
|
|
from khoj.utils import helpers
|
|
|
|
|
|
def test_get_from_null_dict():
|
|
# null handling
|
|
assert helpers.get_from_dict(dict()) == dict()
|
|
assert helpers.get_from_dict(dict(), None) == None
|
|
|
|
# key present in nested dictionary
|
|
# 1-level dictionary
|
|
assert helpers.get_from_dict({"a": 1, "b": 2}, "a") == 1
|
|
assert helpers.get_from_dict({"a": 1, "b": 2}, "c") == None
|
|
|
|
# 2-level dictionary
|
|
assert helpers.get_from_dict({"a": {"a_a": 1}, "b": 2}, "a") == {"a_a": 1}
|
|
assert helpers.get_from_dict({"a": {"a_a": 1}, "b": 2}, "a", "a_a") == 1
|
|
|
|
# key not present in nested dictionary
|
|
# 2-level_dictionary
|
|
assert helpers.get_from_dict({"a": {"a_a": 1}, "b": 2}, "b", "b_a") == None
|
|
|
|
|
|
def test_merge_dicts():
|
|
# basic merge of dicts with non-overlapping keys
|
|
assert helpers.merge_dicts(priority_dict={"a": 1}, default_dict={"b": 2}) == {"a": 1, "b": 2}
|
|
|
|
# use default dict items when not present in priority dict
|
|
assert helpers.merge_dicts(priority_dict={}, default_dict={"b": 2}) == {"b": 2}
|
|
|
|
# do not override existing key in priority_dict with default dict
|
|
assert helpers.merge_dicts(priority_dict={"a": 1}, default_dict={"a": 2}) == {"a": 1}
|
|
|
|
|
|
def test_lru_cache():
|
|
# Test initializing cache
|
|
cache = helpers.LRU({"a": 1, "b": 2}, capacity=2)
|
|
assert cache == {"a": 1, "b": 2}
|
|
|
|
# Test capacity overflow
|
|
cache["c"] = 3
|
|
assert cache == {"b": 2, "c": 3}
|
|
|
|
# Test delete least recently used item from LRU cache on capacity overflow
|
|
cache["b"] # accessing 'b' makes it the most recently used item
|
|
cache["d"] = 4 # so 'c' is deleted from the cache instead of 'b'
|
|
assert cache == {"b": 2, "d": 4}
|
|
|
|
|
|
@pytest.mark.skip(reason="Memory leak exists on GPU, MPS devices")
|
|
def test_encode_docs_memory_leak():
|
|
# Arrange
|
|
iterations = 50
|
|
batch_size = 20
|
|
embeddings_model = EmbeddingsModel()
|
|
memory_usage_trend = []
|
|
device = f"{helpers.get_device()}".upper()
|
|
|
|
# Act
|
|
# Encode random strings repeatedly and record memory usage trend
|
|
for iteration in range(iterations):
|
|
random_docs = [" ".join(secrets.token_hex(5) for _ in range(10)) for _ in range(batch_size)]
|
|
a = [embeddings_model.embed_documents(random_docs)]
|
|
memory_usage_trend += [psutil.Process().memory_info().rss / (1024 * 1024)]
|
|
print(f"{iteration:02d}, {memory_usage_trend[-1]:.2f}", flush=True)
|
|
|
|
# Calculate slope of line fitting memory usage history
|
|
memory_usage_trend = np.array(memory_usage_trend)
|
|
slope, _, _, _, _ = linregress(np.arange(len(memory_usage_trend)), memory_usage_trend)
|
|
print(f"Memory usage increased at ~{slope:.2f} MB per iteration on {device}")
|
|
|
|
# Assert
|
|
# If slope is positive memory utilization is increasing
|
|
# Positive threshold of 2, from observing memory usage trend on MPS vs CPU device
|
|
assert slope < 2, f"Memory leak suspected on {device}. Memory usage increased at ~{slope:.2f} MB per iteration"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_reading_webpage():
|
|
# Arrange
|
|
website = "https://en.wikipedia.org/wiki/Great_Chicago_Fire"
|
|
|
|
# Act
|
|
response = await read_webpage_at_url(website)
|
|
|
|
# Assert
|
|
assert (
|
|
"An alarm sent from the area near the fire also failed to register at the courthouse where the fire watchmen were"
|
|
in response
|
|
)
|
|
|
|
|
|
@pytest.mark.skipif(os.getenv("OLOSTEP_API_KEY") is None, reason="OLOSTEP_API_KEY is not set")
|
|
@pytest.mark.asyncio
|
|
async def test_reading_webpage_with_olostep():
|
|
# Arrange
|
|
website = "https://en.wikipedia.org/wiki/Great_Chicago_Fire"
|
|
|
|
# Act
|
|
response = await read_webpage_with_olostep(website)
|
|
|
|
# Assert
|
|
assert (
|
|
"An alarm sent from the area near the fire also failed to register at the courthouse where the fire watchmen were"
|
|
in response
|
|
)
|