2023-08-31 21:55:17 +02:00
|
|
|
import os
|
2023-12-28 13:34:02 +01:00
|
|
|
from pathlib import Path
|
2022-09-10 22:57:17 +02:00
|
|
|
|
2023-11-22 07:11:32 +01:00
|
|
|
from khoj.processor.content.markdown.markdown_to_entries import MarkdownToEntries
|
2023-08-31 21:55:17 +02:00
|
|
|
from khoj.utils.fs_syncer import get_markdown_files
|
|
|
|
from khoj.utils.rawconfig import TextContentConfig
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
|
2024-01-29 11:01:48 +01:00
|
|
|
def test_extract_markdown_with_no_headings(tmp_path):
|
|
|
|
"Convert markdown file with no heading to entry format."
|
2022-09-10 22:57:17 +02:00
|
|
|
# Arrange
|
2023-02-17 17:04:26 +01:00
|
|
|
entry = f"""
|
2022-09-10 22:57:17 +02:00
|
|
|
- Bullet point 1
|
|
|
|
- Bullet point 2
|
2023-02-17 17:04:26 +01:00
|
|
|
"""
|
2023-08-31 21:55:17 +02:00
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
2024-03-31 14:33:17 +02:00
|
|
|
expected_heading = f"# {tmp_path}"
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
2024-02-10 10:04:09 +01:00
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Assert
|
2024-02-09 12:33:36 +01:00
|
|
|
assert len(entries) == 1
|
2023-05-03 12:55:56 +02:00
|
|
|
# Ensure raw entry with no headings do not get heading prefix prepended
|
2024-02-09 12:33:36 +01:00
|
|
|
assert not entries[0].raw.startswith("#")
|
2023-05-03 12:55:56 +02:00
|
|
|
# Ensure compiled entry has filename prepended as top level heading
|
2024-02-09 12:33:36 +01:00
|
|
|
assert entries[0].compiled.startswith(expected_heading)
|
Part 2: Add web UI updates for basic agent interactions (#675)
* Initial pass at backend changes to support agents
- Add a db model for Agents, attaching them to conversations
- When an agent is added to a conversation, override the system prompt to tweak the instructions
- Agents can be configured with prompt modification, model specification, a profile picture, and other things
- Admin-configured models will not be editable by individual users
- Add unit tests to verify agent behavior. Unit tests demonstrate imperfect adherence to prompt specifications
* Customize default behaviors for conversations without agents or with default agents
* Add a new web client route for viewing all agents
* Use agent_id for getting correct agent
* Add web UI views for agents
- Add a page to view all agents
- Add slugs to manage agents
- Add a view to view single agent
- Display active agent when in chat window
- Fix post-login redirect issue
* Fix agent view
* Spruce up the 404 page and improve the overall layout for agents pages
* Create chat actor for directly reading webpages based on user message
- Add prompt for the read webpages chat actor to extract, infer
webpage links
- Make chat actor infer or extract webpage to read directly from user
message
- Rename previous read_webpage function to more narrow
read_webpage_at_url function
* Rename agents_page -> agent_page
* Fix unit test for adding the filename to the compiled markdown entry
* Fix layout of agent, agents pages
* Merge migrations
* Let the name, slug of the default agent be Khoj, khoj
* Fix chat-related unit tests
* Add webpage chat command for read web pages requested by user
Update auto chat command inference prompt to show example of when to
use webpage chat command (i.e when url is directly provided in link)
* Support webpage command in chat API
- Fallback to use webpage when SERPER not setup and online command was
attempted
- Do not stop responding if can't retrieve online results. Try to
respond without the online context
* Test select webpage as data source and extract web urls chat actors
* Tweak prompts to extract information from webpages, online results
- Show more of the truncated messages for debugging context
- Update Khoj personality prompt to encourage it to remember it's capabilities
* Rename extract_content online results field to webpages
* Parallelize simple webpage read and extractor
Similar to what is being done with search_online with olostep
* Pass multiple webpages with their urls in online results context
Previously even if MAX_WEBPAGES_TO_READ was > 1, only 1 extracted
content would ever be passed.
URL of the extracted webpage content wasn't passed to clients in
online results context. This limited them from being rendered
* Render webpage read in chat response references on Web, Desktop apps
* Time chat actor responses & chat api request start for perf analysis
* Increase the keep alive timeout in the main application for testing
* Do not pipe access/error logs to separate files. Flow to stdout/stderr
* [Temp] Reduce to 1 gunicorn worker
* Change prod docker image to use jammy, rather than nvidia base image
* Use Khoj icon when Khoj web is installed on iOS as a PWA
* Make slug required for agents
* Simplify calling logic and prevent agent access for unauthenticated users
* Standardize to use personality over tuning in agent nomenclature
* Make filtering logic more stringent for accessible agents and remove unused method:
* Format chat message query
---------
Co-authored-by: Debanjum Singh Solanky <debanjum@gmail.com>
2024-03-26 13:43:24 +01:00
|
|
|
# Ensure compiled entry also includes the file name
|
2024-02-09 12:33:36 +01:00
|
|
|
assert str(tmp_path) in entries[0].compiled
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
|
2024-01-29 11:01:48 +01:00
|
|
|
def test_extract_single_markdown_entry(tmp_path):
|
|
|
|
"Convert markdown from single file to entry format."
|
2022-09-10 22:57:17 +02:00
|
|
|
# Arrange
|
2023-02-17 17:04:26 +01:00
|
|
|
entry = f"""### Heading
|
2022-09-10 22:57:17 +02:00
|
|
|
\t\r
|
|
|
|
Body Line 1
|
2023-02-17 17:04:26 +01:00
|
|
|
"""
|
2023-08-31 21:55:17 +02:00
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
2024-02-10 10:04:09 +01:00
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Assert
|
2024-02-09 12:33:36 +01:00
|
|
|
assert len(entries) == 1
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
|
2024-01-29 11:01:48 +01:00
|
|
|
def test_extract_multiple_markdown_entries(tmp_path):
|
|
|
|
"Convert multiple markdown from single file to entry format."
|
2022-09-10 22:57:17 +02:00
|
|
|
# Arrange
|
2023-02-17 17:04:26 +01:00
|
|
|
entry = f"""
|
2022-09-10 22:57:17 +02:00
|
|
|
### Heading 1
|
|
|
|
\t\r
|
|
|
|
Heading 1 Body Line 1
|
|
|
|
### Heading 2
|
|
|
|
\t\r
|
|
|
|
Heading 2 Body Line 2
|
2023-02-17 17:04:26 +01:00
|
|
|
"""
|
2023-08-31 21:55:17 +02:00
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
2024-02-10 10:04:09 +01:00
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
# Assert
|
2024-02-09 12:33:36 +01:00
|
|
|
assert len(entries) == 2
|
2023-03-30 07:30:25 +02:00
|
|
|
# Ensure entry compiled strings include the markdown files they originate from
|
2023-08-31 21:55:17 +02:00
|
|
|
assert all([tmp_path.stem in entry.compiled for entry in entries])
|
2022-09-10 22:57:17 +02:00
|
|
|
|
|
|
|
|
2024-02-10 18:33:30 +01:00
|
|
|
def test_extract_entries_with_different_level_headings(tmp_path):
|
|
|
|
"Extract markdown entries with different level headings."
|
2022-09-12 09:39:39 +02:00
|
|
|
# Arrange
|
2024-02-10 18:33:30 +01:00
|
|
|
entry = f"""
|
|
|
|
# Heading 1
|
|
|
|
## Sub-Heading 1.1
|
|
|
|
# Heading 2
|
|
|
|
"""
|
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
2023-08-31 21:55:17 +02:00
|
|
|
|
2022-09-12 09:39:39 +02:00
|
|
|
# Act
|
2024-02-10 18:33:30 +01:00
|
|
|
# Extract Entries from specified Markdown files
|
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
2022-09-12 09:39:39 +02:00
|
|
|
|
|
|
|
# Assert
|
2024-02-10 18:33:30 +01:00
|
|
|
assert len(entries) == 2
|
|
|
|
assert entries[0].raw == "# Heading 1\n## Sub-Heading 1.1", "Ensure entry includes heading ancestory"
|
|
|
|
assert entries[1].raw == "# Heading 2\n"
|
2022-09-12 09:39:39 +02:00
|
|
|
|
|
|
|
|
2024-02-10 18:33:30 +01:00
|
|
|
def test_extract_entries_with_non_incremental_heading_levels(tmp_path):
|
|
|
|
"Extract markdown entries when deeper child level before shallower child level."
|
2023-01-17 16:42:36 +01:00
|
|
|
# Arrange
|
2023-02-17 17:04:26 +01:00
|
|
|
entry = f"""
|
2023-01-17 16:42:36 +01:00
|
|
|
# Heading 1
|
2024-02-10 18:33:30 +01:00
|
|
|
#### Sub-Heading 1.1
|
|
|
|
## Sub-Heading 1.2
|
2024-01-29 11:01:48 +01:00
|
|
|
# Heading 2
|
2023-02-17 17:04:26 +01:00
|
|
|
"""
|
2023-08-31 21:55:17 +02:00
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
2023-01-17 16:42:36 +01:00
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
2024-02-10 10:04:09 +01:00
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
2023-01-17 16:42:36 +01:00
|
|
|
|
|
|
|
# Assert
|
2024-01-29 11:01:48 +01:00
|
|
|
assert len(entries) == 3
|
2024-02-10 18:33:30 +01:00
|
|
|
assert entries[0].raw == "# Heading 1\n#### Sub-Heading 1.1", "Ensure entry includes heading ancestory"
|
|
|
|
assert entries[1].raw == "# Heading 1\n## Sub-Heading 1.2", "Ensure entry includes heading ancestory"
|
|
|
|
assert entries[2].raw == "# Heading 2\n"
|
2024-01-29 11:01:48 +01:00
|
|
|
|
|
|
|
|
|
|
|
def test_extract_entries_with_text_before_headings(tmp_path):
|
|
|
|
"Extract markdown entries with some text before any headings."
|
|
|
|
# Arrange
|
|
|
|
entry = f"""
|
|
|
|
Text before headings
|
|
|
|
# Heading 1
|
|
|
|
body line 1
|
|
|
|
## Heading 2
|
|
|
|
body line 2
|
|
|
|
"""
|
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=3)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(entries) == 3
|
2024-02-10 18:33:30 +01:00
|
|
|
assert entries[0].raw == "\nText before headings"
|
2024-01-29 11:01:48 +01:00
|
|
|
assert entries[1].raw == "# Heading 1\nbody line 1"
|
2024-02-10 18:33:30 +01:00
|
|
|
assert entries[2].raw == "# Heading 1\n## Heading 2\nbody line 2\n", "Ensure raw entry includes heading ancestory"
|
2023-01-17 16:42:36 +01:00
|
|
|
|
|
|
|
|
2024-02-10 10:04:09 +01:00
|
|
|
def test_parse_markdown_file_into_single_entry_if_small(tmp_path):
|
|
|
|
"Parse markdown file into single entry if it fits within the token limits."
|
|
|
|
# Arrange
|
|
|
|
entry = f"""
|
|
|
|
# Heading 1
|
|
|
|
body line 1
|
|
|
|
## Subheading 1.1
|
|
|
|
body line 1.1
|
|
|
|
"""
|
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=12)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(entries) == 1
|
|
|
|
assert entries[0].raw == entry
|
|
|
|
|
|
|
|
|
2024-02-10 18:33:30 +01:00
|
|
|
def test_parse_markdown_entry_with_children_as_single_entry_if_small(tmp_path):
|
|
|
|
"Parse markdown entry with child headings as single entry if it fits within the tokens limits."
|
|
|
|
# Arrange
|
|
|
|
entry = f"""
|
|
|
|
# Heading 1
|
|
|
|
body line 1
|
|
|
|
## Subheading 1.1
|
|
|
|
body line 1.1
|
|
|
|
# Heading 2
|
|
|
|
body line 2
|
|
|
|
## Subheading 2.1
|
|
|
|
longer body line 2.1
|
|
|
|
"""
|
|
|
|
data = {
|
|
|
|
f"{tmp_path}": entry,
|
|
|
|
}
|
|
|
|
|
|
|
|
# Act
|
|
|
|
# Extract Entries from specified Markdown files
|
|
|
|
entries = MarkdownToEntries.extract_markdown_entries(markdown_files=data, max_tokens=12)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(entries) == 3
|
|
|
|
assert (
|
|
|
|
entries[0].raw == "# Heading 1\nbody line 1\n## Subheading 1.1\nbody line 1.1"
|
|
|
|
), "First entry includes children headings"
|
|
|
|
assert entries[1].raw == "# Heading 2\nbody line 2", "Second entry does not include children headings"
|
|
|
|
assert (
|
|
|
|
entries[2].raw == "# Heading 2\n## Subheading 2.1\nlonger body line 2.1\n"
|
|
|
|
), "Third entry is second entries child heading"
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_markdown_files(tmp_path):
|
|
|
|
"Ensure Markdown files specified via input-filter, input-files extracted"
|
|
|
|
# Arrange
|
|
|
|
# Include via input-filter globs
|
|
|
|
group1_file1 = create_file(tmp_path, filename="group1-file1.md")
|
|
|
|
group1_file2 = create_file(tmp_path, filename="group1-file2.md")
|
|
|
|
group2_file1 = create_file(tmp_path, filename="group2-file1.markdown")
|
|
|
|
group2_file2 = create_file(tmp_path, filename="group2-file2.markdown")
|
|
|
|
# Include via input-file field
|
|
|
|
file1 = create_file(tmp_path, filename="notes.md")
|
|
|
|
# Not included by any filter
|
|
|
|
create_file(tmp_path, filename="not-included-markdown.md")
|
|
|
|
create_file(tmp_path, filename="not-included-text.txt")
|
|
|
|
|
|
|
|
expected_files = set(
|
|
|
|
[os.path.join(tmp_path, file.name) for file in [group1_file1, group1_file2, group2_file1, group2_file2, file1]]
|
|
|
|
)
|
|
|
|
|
|
|
|
# Setup input-files, input-filters
|
|
|
|
input_files = [tmp_path / "notes.md"]
|
|
|
|
input_filter = [tmp_path / "group1*.md", tmp_path / "group2*.markdown"]
|
|
|
|
|
|
|
|
markdown_config = TextContentConfig(
|
|
|
|
input_files=input_files,
|
|
|
|
input_filter=[str(filter) for filter in input_filter],
|
|
|
|
compressed_jsonl=tmp_path / "test.jsonl",
|
|
|
|
embeddings_file=tmp_path / "test_embeddings.jsonl",
|
|
|
|
)
|
|
|
|
|
|
|
|
# Act
|
|
|
|
extracted_org_files = get_markdown_files(markdown_config)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(extracted_org_files) == 5
|
|
|
|
assert set(extracted_org_files.keys()) == expected_files
|
|
|
|
|
|
|
|
|
2022-09-10 22:57:17 +02:00
|
|
|
# Helper Functions
|
2023-05-03 12:55:56 +02:00
|
|
|
def create_file(tmp_path: Path, entry=None, filename="test.md"):
|
2022-09-12 09:39:39 +02:00
|
|
|
markdown_file = tmp_path / filename
|
2022-09-10 22:57:17 +02:00
|
|
|
markdown_file.touch()
|
2022-09-12 09:39:39 +02:00
|
|
|
if entry:
|
|
|
|
markdown_file.write_text(entry)
|
2022-09-10 22:57:17 +02:00
|
|
|
return markdown_file
|