Support Multiple Input Filters to Configure Content to Index

- 536f03a Process text content files in sorted order for stable indexing
- a701ad0 Support multiple input-filters to configure content to index via `khoj.yml`

Resolves #84
This commit is contained in:
Debanjum 2022-09-12 08:19:52 +00:00 committed by GitHub
commit 3d86d763c5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 147 additions and 44 deletions

View file

@ -63,22 +63,28 @@ def beancount_to_jsonl(config: TextContentConfig, previous_entries=None):
return entries_with_ids return entries_with_ids
def get_beancount_files(beancount_files=None, beancount_file_filter=None): def get_beancount_files(beancount_files=None, beancount_file_filters=None):
"Get Beancount files to process" "Get Beancount files to process"
absolute_beancount_files, filtered_beancount_files = set(), set() absolute_beancount_files, filtered_beancount_files = set(), set()
if beancount_files: if beancount_files:
absolute_beancount_files = {get_absolute_path(beancount_file) absolute_beancount_files = {get_absolute_path(beancount_file)
for beancount_file for beancount_file
in beancount_files} in beancount_files}
if beancount_file_filter: if beancount_file_filters:
filtered_beancount_files = set(glob.glob(get_absolute_path(beancount_file_filter))) filtered_beancount_files = {
filtered_file
for beancount_file_filter in beancount_file_filters
for filtered_file in glob.glob(get_absolute_path(beancount_file_filter))
}
all_beancount_files = absolute_beancount_files | filtered_beancount_files all_beancount_files = sorted(absolute_beancount_files | filtered_beancount_files)
files_with_non_beancount_extensions = {beancount_file files_with_non_beancount_extensions = {
beancount_file
for beancount_file for beancount_file
in all_beancount_files in all_beancount_files
if not beancount_file.endswith(".bean") and not beancount_file.endswith(".beancount")} if not beancount_file.endswith(".bean") and not beancount_file.endswith(".beancount")
}
if any(files_with_non_beancount_extensions): if any(files_with_non_beancount_extensions):
print(f"[Warning] There maybe non beancount files in the input set: {files_with_non_beancount_extensions}") print(f"[Warning] There maybe non beancount files in the input set: {files_with_non_beancount_extensions}")

View file

@ -63,15 +63,19 @@ def markdown_to_jsonl(config: TextContentConfig, previous_entries=None):
return entries_with_ids return entries_with_ids
def get_markdown_files(markdown_files=None, markdown_file_filter=None): def get_markdown_files(markdown_files=None, markdown_file_filters=None):
"Get Markdown files to process" "Get Markdown files to process"
absolute_markdown_files, filtered_markdown_files = set(), set() absolute_markdown_files, filtered_markdown_files = set(), set()
if markdown_files: if markdown_files:
absolute_markdown_files = {get_absolute_path(markdown_file) for markdown_file in markdown_files} absolute_markdown_files = {get_absolute_path(markdown_file) for markdown_file in markdown_files}
if markdown_file_filter: if markdown_file_filters:
filtered_markdown_files = set(glob.glob(get_absolute_path(markdown_file_filter))) filtered_markdown_files = {
filtered_file
for markdown_file_filter in markdown_file_filters
for filtered_file in glob.glob(get_absolute_path(markdown_file_filter))
}
all_markdown_files = absolute_markdown_files | filtered_markdown_files all_markdown_files = sorted(absolute_markdown_files | filtered_markdown_files)
files_with_non_markdown_extensions = { files_with_non_markdown_extensions = {
md_file md_file

View file

@ -68,17 +68,21 @@ def org_to_jsonl(config: TextContentConfig, previous_entries=None):
return entries_with_ids return entries_with_ids
def get_org_files(org_files=None, org_file_filter=None): def get_org_files(org_files=None, org_file_filters=None):
"Get Org files to process" "Get Org files to process"
absolute_org_files, filtered_org_files = set(), set() absolute_org_files, filtered_org_files = set(), set()
if org_files: if org_files:
absolute_org_files = {get_absolute_path(org_file) absolute_org_files = {get_absolute_path(org_file)
for org_file for org_file
in org_files} in org_files}
if org_file_filter: if org_file_filters:
filtered_org_files = set(glob.glob(get_absolute_path(org_file_filter))) filtered_org_files = {
filtered_file
for org_file_filter in org_file_filters
for filtered_file in glob.glob(get_absolute_path(org_file_filter))
}
all_org_files = absolute_org_files | filtered_org_files all_org_files = sorted(absolute_org_files | filtered_org_files)
files_with_non_org_extensions = {org_file for org_file in all_org_files if not org_file.endswith(".org")} files_with_non_org_extensions = {org_file for org_file in all_org_files if not org_file.endswith(".org")}
if any(files_with_non_org_extensions): if any(files_with_non_org_extensions):

View file

@ -241,7 +241,11 @@ def setup(config: ImageContentConfig, search_config: ImageSearchConfig, regenera
image_directories = [resolve_absolute_path(directory, strict=True) for directory in config.input_directories] image_directories = [resolve_absolute_path(directory, strict=True) for directory in config.input_directories]
absolute_image_files = set(extract_entries(image_directories)) absolute_image_files = set(extract_entries(image_directories))
if config.input_filter: if config.input_filter:
filtered_image_files = set(glob.glob(get_absolute_path(config.input_filter))) filtered_image_files = {
filtered_file
for input_filter in config.input_filter
for filtered_file in glob.glob(get_absolute_path(input_filter))
}
all_image_files = sorted(list(absolute_image_files | filtered_image_files)) all_image_files = sorted(list(absolute_image_files | filtered_image_files))

View file

@ -6,7 +6,7 @@ from typing import List, Optional
from pydantic import BaseModel, validator from pydantic import BaseModel, validator
# Internal Packages # Internal Packages
from src.utils.helpers import to_snake_case_from_dash from src.utils.helpers import to_snake_case_from_dash, is_none_or_empty
class ConfigBase(BaseModel): class ConfigBase(BaseModel):
class Config: class Config:
@ -15,27 +15,27 @@ class ConfigBase(BaseModel):
class TextContentConfig(ConfigBase): class TextContentConfig(ConfigBase):
input_files: Optional[List[Path]] input_files: Optional[List[Path]]
input_filter: Optional[str] input_filter: Optional[List[str]]
compressed_jsonl: Path compressed_jsonl: Path
embeddings_file: Path embeddings_file: Path
index_heading_entries: Optional[bool] = False index_heading_entries: Optional[bool] = False
@validator('input_filter') @validator('input_filter')
def input_filter_or_files_required(cls, input_filter, values, **kwargs): def input_filter_or_files_required(cls, input_filter, values, **kwargs):
if input_filter is None and ('input_files' not in values or values["input_files"] is None): if is_none_or_empty(input_filter) and ('input_files' not in values or values["input_files"] is None):
raise ValueError("Either input_filter or input_files required in all content-type.<text_search> section of Khoj config file") raise ValueError("Either input_filter or input_files required in all content-type.<text_search> section of Khoj config file")
return input_filter return input_filter
class ImageContentConfig(ConfigBase): class ImageContentConfig(ConfigBase):
input_directories: Optional[List[Path]] input_directories: Optional[List[Path]]
input_filter: Optional[str] input_filter: Optional[List[str]]
embeddings_file: Path embeddings_file: Path
use_xmp_metadata: bool use_xmp_metadata: bool
batch_size: int batch_size: int
@validator('input_filter') @validator('input_filter')
def input_filter_or_directories_required(cls, input_filter, values, **kwargs): def input_filter_or_directories_required(cls, input_filter, values, **kwargs):
if input_filter is None and ('input_directories' not in values or values["input_directories"] is None): if is_none_or_empty(input_filter) and ('input_directories' not in values or values["input_directories"] is None):
raise ValueError("Either input_filter or input_directories required in all content-type.image section of Khoj config file") raise ValueError("Either input_filter or input_directories required in all content-type.image section of Khoj config file")
return input_filter return input_filter

View file

@ -55,7 +55,7 @@ def content_config(tmp_path_factory, search_config: SearchConfig):
# Generate Notes Embeddings from Test Notes # Generate Notes Embeddings from Test Notes
content_config.org = TextContentConfig( content_config.org = TextContentConfig(
input_files = None, input_files = None,
input_filter = 'tests/data/org/*.org', input_filter = ['tests/data/org/*.org'],
compressed_jsonl = content_dir.joinpath('notes.jsonl.gz'), compressed_jsonl = content_dir.joinpath('notes.jsonl.gz'),
embeddings_file = content_dir.joinpath('note_embeddings.pt')) embeddings_file = content_dir.joinpath('note_embeddings.pt'))

View file

@ -1,9 +1,10 @@
content-type: content-type:
org: org:
input-files: [ "~/first_from_config.org", "~/second_from_config.org" ] input-files: [ "~/first_from_config.org", "~/second_from_config.org" ]
input-filter: "*.org" input-filter: ["*.org", "~/notes/*.org"]
compressed-jsonl: ".notes.json.gz" compressed-jsonl: ".notes.json.gz"
embeddings-file: ".note_embeddings.pt" embeddings-file: ".note_embeddings.pt"
index-header-entries: true
search-type: search-type:
asymmetric: asymmetric:

View file

@ -2,7 +2,7 @@
import json import json
# Internal Packages # Internal Packages
from src.processor.ledger.beancount_to_jsonl import extract_beancount_transactions, convert_transactions_to_maps, convert_transaction_maps_to_jsonl from src.processor.ledger.beancount_to_jsonl import extract_beancount_transactions, convert_transactions_to_maps, convert_transaction_maps_to_jsonl, get_beancount_files
def test_no_transactions_in_file(tmp_path): def test_no_transactions_in_file(tmp_path):
@ -75,10 +75,38 @@ Assets:Test:Test -1.00 KES
assert len(jsonl_data) == 2 assert len(jsonl_data) == 2
def test_get_beancount_files(tmp_path):
"Ensure Beancount files specified via input-filter, input-files extracted"
# Arrange
# Include via input-filter globs
group1_file1 = create_file(tmp_path, filename="group1-file1.bean")
group1_file2 = create_file(tmp_path, filename="group1-file2.bean")
group2_file1 = create_file(tmp_path, filename="group2-file1.beancount")
group2_file2 = create_file(tmp_path, filename="group2-file2.beancount")
# Include via input-file field
file1 = create_file(tmp_path, filename="ledger.bean")
# Not included by any filter
create_file(tmp_path, filename="not-included-ledger.bean")
create_file(tmp_path, filename="not-included-text.txt")
expected_files = sorted(map(str, [group1_file1, group1_file2, group2_file1, group2_file2, file1]))
# Setup input-files, input-filters
input_files = [tmp_path / 'ledger.bean']
input_filter = [tmp_path / 'group1*.bean', tmp_path / 'group2*.beancount']
# Act
extracted_org_files = get_beancount_files(input_files, input_filter)
# Assert
assert len(extracted_org_files) == 5
assert extracted_org_files == expected_files
# Helper Functions # Helper Functions
def create_file(tmp_path, entry, filename="ledger.beancount"): def create_file(tmp_path, entry=None, filename="ledger.beancount"):
beancount_file = tmp_path / f"notes/{filename}" beancount_file = tmp_path / filename
beancount_file.parent.mkdir()
beancount_file.touch() beancount_file.touch()
if entry:
beancount_file.write_text(entry) beancount_file.write_text(entry)
return beancount_file return beancount_file

View file

@ -2,7 +2,7 @@
import json import json
# Internal Packages # Internal Packages
from src.processor.markdown.markdown_to_jsonl import extract_markdown_entries, convert_markdown_maps_to_jsonl, convert_markdown_entries_to_maps from src.processor.markdown.markdown_to_jsonl import extract_markdown_entries, convert_markdown_maps_to_jsonl, convert_markdown_entries_to_maps, get_markdown_files
def test_markdown_file_with_no_headings_to_jsonl(tmp_path): def test_markdown_file_with_no_headings_to_jsonl(tmp_path):
@ -72,10 +72,38 @@ def test_multiple_markdown_entries_to_jsonl(tmp_path):
assert len(jsonl_data) == 2 assert len(jsonl_data) == 2
def test_get_markdown_files(tmp_path):
"Ensure Markdown files specified via input-filter, input-files extracted"
# Arrange
# Include via input-filter globs
group1_file1 = create_file(tmp_path, filename="group1-file1.md")
group1_file2 = create_file(tmp_path, filename="group1-file2.md")
group2_file1 = create_file(tmp_path, filename="group2-file1.markdown")
group2_file2 = create_file(tmp_path, filename="group2-file2.markdown")
# Include via input-file field
file1 = create_file(tmp_path, filename="notes.md")
# Not included by any filter
create_file(tmp_path, filename="not-included-markdown.md")
create_file(tmp_path, filename="not-included-text.txt")
expected_files = sorted(map(str, [group1_file1, group1_file2, group2_file1, group2_file2, file1]))
# Setup input-files, input-filters
input_files = [tmp_path / 'notes.md']
input_filter = [tmp_path / 'group1*.md', tmp_path / 'group2*.markdown']
# Act
extracted_org_files = get_markdown_files(input_files, input_filter)
# Assert
assert len(extracted_org_files) == 5
assert extracted_org_files == expected_files
# Helper Functions # Helper Functions
def create_file(tmp_path, entry, filename="test.md"): def create_file(tmp_path, entry=None, filename="test.md"):
markdown_file = tmp_path / f"notes/{filename}" markdown_file = tmp_path / filename
markdown_file.parent.mkdir()
markdown_file.touch() markdown_file.touch()
if entry:
markdown_file.write_text(entry) markdown_file.write_text(entry)
return markdown_file return markdown_file

View file

@ -2,7 +2,7 @@
import json import json
# Internal Packages # Internal Packages
from src.processor.org_mode.org_to_jsonl import convert_org_entries_to_jsonl, convert_org_nodes_to_entries, extract_org_entries from src.processor.org_mode.org_to_jsonl import convert_org_entries_to_jsonl, convert_org_nodes_to_entries, extract_org_entries, get_org_files
from src.utils.helpers import is_none_or_empty from src.utils.helpers import is_none_or_empty
@ -81,10 +81,38 @@ def test_file_with_no_headings_to_jsonl(tmp_path):
assert len(jsonl_data) == 1 assert len(jsonl_data) == 1
def test_get_org_files(tmp_path):
"Ensure Org files specified via input-filter, input-files extracted"
# Arrange
# Include via input-filter globs
group1_file1 = create_file(tmp_path, filename="group1-file1.org")
group1_file2 = create_file(tmp_path, filename="group1-file2.org")
group2_file1 = create_file(tmp_path, filename="group2-file1.org")
group2_file2 = create_file(tmp_path, filename="group2-file2.org")
# Include via input-file field
orgfile1 = create_file(tmp_path, filename="orgfile1.org")
# Not included by any filter
create_file(tmp_path, filename="orgfile2.org")
create_file(tmp_path, filename="text1.txt")
expected_files = sorted(map(str, [group1_file1, group1_file2, group2_file1, group2_file2, orgfile1]))
# Setup input-files, input-filters
input_files = [tmp_path / 'orgfile1.org']
input_filter = [tmp_path / 'group1*.org', tmp_path / 'group2*.org']
# Act
extracted_org_files = get_org_files(input_files, input_filter)
# Assert
assert len(extracted_org_files) == 5
assert extracted_org_files == expected_files
# Helper Functions # Helper Functions
def create_file(tmp_path, entry, filename="test.org"): def create_file(tmp_path, entry=None, filename="test.org"):
org_file = tmp_path / f"notes/{filename}" org_file = tmp_path / filename
org_file.parent.mkdir()
org_file.touch() org_file.touch()
if entry:
org_file.write_text(entry) org_file.write_text(entry)
return org_file return org_file

View file

@ -16,7 +16,7 @@ from src.processor.org_mode.org_to_jsonl import org_to_jsonl
# ---------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------
def test_asymmetric_setup_with_missing_file_raises_error(content_config: ContentConfig, search_config: SearchConfig): def test_asymmetric_setup_with_missing_file_raises_error(content_config: ContentConfig, search_config: SearchConfig):
# Arrange # Arrange
file_to_index = Path(content_config.org.input_filter).parent / "new_file_to_index.org" file_to_index = Path(content_config.org.input_filter[0]).parent / "new_file_to_index.org"
new_org_content_config = deepcopy(content_config.org) new_org_content_config = deepcopy(content_config.org)
new_org_content_config.input_files = [f'{file_to_index}'] new_org_content_config.input_files = [f'{file_to_index}']
new_org_content_config.input_filter = None new_org_content_config.input_filter = None
@ -30,7 +30,7 @@ def test_asymmetric_setup_with_missing_file_raises_error(content_config: Content
# ---------------------------------------------------------------------------------------------------- # ----------------------------------------------------------------------------------------------------
def test_asymmetric_setup_with_empty_file_raises_error(content_config: ContentConfig, search_config: SearchConfig): def test_asymmetric_setup_with_empty_file_raises_error(content_config: ContentConfig, search_config: SearchConfig):
# Arrange # Arrange
file_to_index = Path(content_config.org.input_filter).parent / "new_file_to_index.org" file_to_index = Path(content_config.org.input_filter[0]).parent / "new_file_to_index.org"
file_to_index.touch() file_to_index.touch()
new_org_content_config = deepcopy(content_config.org) new_org_content_config = deepcopy(content_config.org)
new_org_content_config.input_files = [f'{file_to_index}'] new_org_content_config.input_files = [f'{file_to_index}']
@ -88,7 +88,7 @@ def test_asymmetric_reload(content_config: ContentConfig, search_config: SearchC
assert len(initial_notes_model.entries) == 10 assert len(initial_notes_model.entries) == 10
assert len(initial_notes_model.corpus_embeddings) == 10 assert len(initial_notes_model.corpus_embeddings) == 10
file_to_add_on_reload = Path(content_config.org.input_filter).parent / "reload.org" file_to_add_on_reload = Path(content_config.org.input_filter[0]).parent / "reload.org"
content_config.org.input_files = [f'{file_to_add_on_reload}'] content_config.org.input_files = [f'{file_to_add_on_reload}']
# append Org-Mode Entry to first Org Input File in Config # append Org-Mode Entry to first Org Input File in Config
@ -124,7 +124,7 @@ def test_incremental_update(content_config: ContentConfig, search_config: Search
assert len(initial_notes_model.entries) == 10 assert len(initial_notes_model.entries) == 10
assert len(initial_notes_model.corpus_embeddings) == 10 assert len(initial_notes_model.corpus_embeddings) == 10
file_to_add_on_update = Path(content_config.org.input_filter).parent / "update.org" file_to_add_on_update = Path(content_config.org.input_filter[0]).parent / "update.org"
content_config.org.input_files = [f'{file_to_add_on_update}'] content_config.org.input_files = [f'{file_to_add_on_update}']
# append Org-Mode Entry to first Org Input File in Config # append Org-Mode Entry to first Org Input File in Config