mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 15:38:55 +01:00
Add support for indexing plaintext files (#420)
* Add support for indexing plaintext files - Adds backend support for parsing plaintext files generically (.html, .txt, .xml, .csv, .md) - Add equivalent frontend views for setting up plaintext file indexing - Update config, rawconfig, default config, search API, setup endpoints * Add a nifty plaintext file icon to configure plaintext files in the Web UI * Use generic glob path for plaintext files. Skip indexing files that aren't in whitelist
This commit is contained in:
parent
84d774ea34
commit
7b907add77
14 changed files with 314 additions and 15 deletions
|
@ -17,6 +17,7 @@ from khoj.processor.org_mode.org_to_jsonl import OrgToJsonl
|
|||
from khoj.processor.pdf.pdf_to_jsonl import PdfToJsonl
|
||||
from khoj.processor.github.github_to_jsonl import GithubToJsonl
|
||||
from khoj.processor.notion.notion_to_jsonl import NotionToJsonl
|
||||
from khoj.processor.plaintext.plaintext_to_jsonl import PlaintextToJsonl
|
||||
from khoj.search_type import image_search, text_search
|
||||
from khoj.utils import constants, state
|
||||
from khoj.utils.config import (
|
||||
|
@ -208,6 +209,22 @@ def configure_content(
|
|||
filters=[DateFilter(), WordFilter(), FileFilter()],
|
||||
)
|
||||
|
||||
# Initialize Plaintext Search
|
||||
if (
|
||||
(t == None or t.value == state.SearchType.Plaintext.value)
|
||||
and content_config.plaintext
|
||||
and search_models.text_search
|
||||
):
|
||||
logger.info("📄 Setting up search for plaintext")
|
||||
# Extract Entries, Generate Plaintext Embeddings
|
||||
content_index.plaintext = text_search.setup(
|
||||
PlaintextToJsonl,
|
||||
content_config.plaintext,
|
||||
search_models.text_search.bi_encoder,
|
||||
regenerate=regenerate,
|
||||
filters=[DateFilter(), WordFilter(), FileFilter()],
|
||||
)
|
||||
|
||||
# Initialize Image Search
|
||||
if (
|
||||
(t == None or t.value == state.SearchType.Image.value)
|
||||
|
|
1
src/khoj/interface/web/assets/icons/plaintext.svg
Normal file
1
src/khoj/interface/web/assets/icons/plaintext.svg
Normal file
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" width="100px" height="100px"><path fill="#fefdef" d="M29.614,12.307h-1.268c-4.803,0-8.732,3.93-8.732,8.732v61.535c0,4.803,3.93,8.732,8.732,8.732h43.535c4.803,0,8.732-3.93,8.732-8.732v-50.02C72.74,24.68,68.241,20.182,60.367,12.307H41.614"/><path fill="#1f212b" d="M71.882,92.307H28.347c-5.367,0-9.732-4.366-9.732-9.732V21.04c0-5.367,4.366-9.732,9.732-9.732h1.268c0.552,0,1,0.448,1,1s-0.448,1-1,1h-1.268c-4.264,0-7.732,3.469-7.732,7.732v61.535c0,4.264,3.469,7.732,7.732,7.732h43.535c4.264,0,7.732-3.469,7.732-7.732V32.969L59.953,13.307H41.614c-0.552,0-1-0.448-1-1s0.448-1,1-1h18.752c0.265,0,0.52,0.105,0.707,0.293l20.248,20.248c0.188,0.188,0.293,0.442,0.293,0.707v50.02C81.614,87.941,77.248,92.307,71.882,92.307z"/><path fill="#fef6aa" d="M60.114,12.807v10.986c0,4.958,4.057,9.014,9.014,9.014h11.986"/><path fill="#1f212b" d="M81.114 33.307H69.129c-5.247 0-9.515-4.268-9.515-9.515V12.807c0-.276.224-.5.5-.5s.5.224.5.5v10.985c0 4.695 3.82 8.515 8.515 8.515h11.985c.276 0 .5.224.5.5S81.391 33.307 81.114 33.307zM75.114 51.307c-.276 0-.5-.224-.5-.5v-3c0-.276.224-.5.5-.5s.5.224.5.5v3C75.614 51.083 75.391 51.307 75.114 51.307zM75.114 59.307c-.276 0-.5-.224-.5-.5v-6c0-.276.224-.5.5-.5s.5.224.5.5v6C75.614 59.083 75.391 59.307 75.114 59.307zM67.956 86.307H32.272c-4.223 0-7.658-3.45-7.658-7.689V25.955c0-2.549 1.264-4.931 3.382-6.371.228-.156.54-.095.695.132.155.229.096.54-.132.695-1.844 1.254-2.944 3.326-2.944 5.544v52.663c0 3.688 2.987 6.689 6.658 6.689h35.685c3.671 0 6.658-3.001 6.658-6.689V60.807c0-.276.224-.5.5-.5s.5.224.5.5v17.811C75.614 82.857 72.179 86.307 67.956 86.307z"/><path fill="#1f212b" d="M39.802 14.307l-.117 11.834c0 2.21-2.085 3.666-4.036 3.666-1.951 0-4.217-1.439-4.217-3.649l.037-12.58c0-1.307 1.607-2.451 2.801-2.451 1.194 0 2.345 1.149 2.345 2.456l.021 10.829c0 0-.083.667-1.005.645-.507-.012-1.145-.356-1.016-.906v-9.843h-.813l-.021 9.708c0 1.38.54 1.948 1.875 1.948s1.959-.714 1.959-2.094V13.665c0-2.271-1.36-3.5-3.436-3.5s-3.564 1.261-3.564 3.532l.032 12.11c0 3.04 2.123 4.906 4.968 4.906 2.845 0 5-1.71 5-4.75V14.307H39.802zM53.114 52.307h-23c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h23c.276 0 .5.224.5.5S53.391 52.307 53.114 52.307zM44.114 59.307h-14c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h14c.276 0 .5.224.5.5S44.391 59.307 44.114 59.307zM70.114 59.307h-24c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h24c.276 0 .5.224.5.5S70.391 59.307 70.114 59.307zM61.114 66.307h-11c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h11c.276 0 .5.224.5.5S61.391 66.307 61.114 66.307zM71.114 66.307h-8c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h8c.276 0 .5.224.5.5S71.391 66.307 71.114 66.307zM48.114 66.307h-18c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h18c.276 0 .5.224.5.5S48.391 66.307 48.114 66.307zM70.114 73.307h-13c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h13c.276 0 .5.224.5.5S70.391 73.307 70.114 73.307zM54.114 73.307h-24c-.276 0-.5-.224-.5-.5s.224-.5.5-.5h24c.276 0 .5.224.5.5S54.391 73.307 54.114 73.307z"/></svg>
|
After Width: | Height: | Size: 2.9 KiB |
|
@ -180,6 +180,41 @@
|
|||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-title-row">
|
||||
<img class="card-icon" src="/static/assets/icons/plaintext.svg" alt="Plaintext">
|
||||
<h3 class="card-title">
|
||||
Plaintext
|
||||
{% if current_config.content_type.plaintext %}
|
||||
{% if current_model_state.plaintext == False %}
|
||||
<img id="misconfigured-icon-plaintext" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you need to click Configure.">
|
||||
{% else %}
|
||||
<img id="configured-icon-plaintext" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</h3>
|
||||
</div>
|
||||
<div class="card-description-row">
|
||||
<p class="card-description">Set Plaintext files to index</p>
|
||||
</div>
|
||||
<div class="card-action-row">
|
||||
<a class="card-button" href="/config/content_type/plaintext">
|
||||
{% if current_config.content_type.plaintext %}
|
||||
Update
|
||||
{% else %}
|
||||
Setup
|
||||
{% endif %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M5 12h14M12 5l7 7-7 7"></path></svg>
|
||||
</a>
|
||||
</div>
|
||||
{% if current_config.content_type.plaintext %}
|
||||
<div id="clear-plaintext" class="card-action-row">
|
||||
<button class="card-button" onclick="clearContentType('plaintext')">
|
||||
Disable
|
||||
</button>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="section">
|
||||
|
|
|
@ -106,31 +106,43 @@
|
|||
|
||||
submit.addEventListener("click", function(event) {
|
||||
event.preventDefault();
|
||||
let suffix = ""
|
||||
let globFormat = "**/*."
|
||||
let suffixes = [];
|
||||
if ('{{content_type}}' == "markdown")
|
||||
suffix = "**/*.md"
|
||||
suffixes = ["md", "markdown"]
|
||||
else if ('{{content_type}}' == "org")
|
||||
suffix = "**/*.org"
|
||||
suffixes = ["org"]
|
||||
else if ('{{content_type}}' === "pdf")
|
||||
suffix = "**/*.pdf"
|
||||
suffixes = ["pdf"]
|
||||
else if ('{{content_type}}' === "plaintext")
|
||||
suffixes = ['*']
|
||||
|
||||
var inputFileNodes = document.getElementsByName("input-files");
|
||||
var input_files = getValidInputNodes(inputFileNodes).map(node => node.value);
|
||||
var inputFiles = getValidInputNodes(inputFileNodes).map(node => node.value);
|
||||
|
||||
var inputFilterNodes = document.getElementsByName("input-filter");
|
||||
var input_filter = getValidInputNodes(inputFilterNodes).map(node => `${node.value}/${suffix}`);
|
||||
|
||||
if (input_files.length === 0 && input_filter.length === 0) {
|
||||
var inputFilter = [];
|
||||
var nodes = getValidInputNodes(inputFilterNodes);
|
||||
if (nodes.length > 0) {
|
||||
for (var i = 0; i < nodes.length; i++) {
|
||||
for (var j = 0; j < suffixes.length; j++) {
|
||||
inputFilter.push(nodes[i].value + globFormat + suffixes[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (inputFiles.length === 0 && inputFilter.length === 0) {
|
||||
alert("You must specify at least one input file or input filter.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (input_files.length == 0) {
|
||||
input_files = null;
|
||||
if (inputFiles.length == 0) {
|
||||
inputFiles = null;
|
||||
}
|
||||
|
||||
if (input_filter.length == 0) {
|
||||
input_filter = null;
|
||||
if (inputFilter.length == 0) {
|
||||
inputFilter = null;
|
||||
}
|
||||
|
||||
var compressed_jsonl = document.getElementById("compressed-jsonl").value;
|
||||
|
@ -145,8 +157,8 @@
|
|||
'X-CSRFToken': csrfToken
|
||||
},
|
||||
body: JSON.stringify({
|
||||
"input_files": input_files,
|
||||
"input_filter": input_filter,
|
||||
"input_files": inputFiles,
|
||||
"input_filter": inputFilter,
|
||||
"compressed_jsonl": compressed_jsonl,
|
||||
"embeddings_file": embeddings_file,
|
||||
"index_heading_entries": index_heading_entries
|
||||
|
|
|
@ -73,6 +73,8 @@
|
|||
html += render_pdf(query, [item]);
|
||||
} else if (item.additional.file.includes("notion.so")) {
|
||||
html += `<div class="results-notion">` + `<b><a href="${item.additional.file}">${item.additional.heading}</a></b>` + `<p>${item.entry}</p>` + `</div>`;
|
||||
} else {
|
||||
html += `<div class="results-plugin">` + `<b><a href="${item.additional.file}">${item.additional.heading}</a></b>` + `<p>${item.entry}</p>` + `</div>`;
|
||||
}
|
||||
});
|
||||
return html;
|
||||
|
@ -412,6 +414,7 @@
|
|||
div.results-markdown,
|
||||
div.results-notion,
|
||||
div.results-org,
|
||||
div.results-plugin,
|
||||
div.results-pdf {
|
||||
text-align: left;
|
||||
box-shadow: 2px 2px 2px var(--primary-hover);
|
||||
|
|
0
src/khoj/processor/plaintext/__init__.py
Normal file
0
src/khoj/processor/plaintext/__init__.py
Normal file
117
src/khoj/processor/plaintext/plaintext_to_jsonl.py
Normal file
117
src/khoj/processor/plaintext/plaintext_to_jsonl.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
# Standard Packages
|
||||
import glob
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
# Internal Packages
|
||||
from khoj.processor.text_to_jsonl import TextToJsonl
|
||||
from khoj.utils.helpers import get_absolute_path, timer
|
||||
from khoj.utils.jsonl import load_jsonl, compress_jsonl_data
|
||||
from khoj.utils.rawconfig import Entry
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlaintextToJsonl(TextToJsonl):
|
||||
# Define Functions
|
||||
def process(self, previous_entries=[]):
|
||||
# Extract required fields from config
|
||||
input_files, input_filter, output_file = (
|
||||
self.config.input_files,
|
||||
self.config.input_filter,
|
||||
self.config.compressed_jsonl,
|
||||
)
|
||||
|
||||
# Get Plaintext Input Files to Process
|
||||
all_input_plaintext_files = PlaintextToJsonl.get_plaintext_files(input_files, input_filter)
|
||||
|
||||
# Extract Entries from specified plaintext files
|
||||
with timer("Parse entries from plaintext files", logger):
|
||||
current_entries = PlaintextToJsonl.convert_plaintext_entries_to_maps(
|
||||
PlaintextToJsonl.extract_plaintext_entries(all_input_plaintext_files)
|
||||
)
|
||||
|
||||
# Split entries by max tokens supported by model
|
||||
with timer("Split entries by max token size supported by model", logger):
|
||||
current_entries = self.split_entries_by_max_tokens(current_entries, max_tokens=256)
|
||||
|
||||
# Identify, mark and merge any new entries with previous entries
|
||||
with timer("Identify new or updated entries", logger):
|
||||
entries_with_ids = TextToJsonl.mark_entries_for_update(
|
||||
current_entries, previous_entries, key="compiled", logger=logger
|
||||
)
|
||||
|
||||
with timer("Write entries to JSONL file", logger):
|
||||
# Process Each Entry from All Notes Files
|
||||
entries = list(map(lambda entry: entry[1], entries_with_ids))
|
||||
plaintext_data = PlaintextToJsonl.convert_entries_to_jsonl(entries)
|
||||
|
||||
# Compress JSONL formatted Data
|
||||
compress_jsonl_data(plaintext_data, output_file)
|
||||
|
||||
return entries_with_ids
|
||||
|
||||
@staticmethod
|
||||
def get_plaintext_files(plaintext_files=None, plaintext_file_filters=None):
|
||||
"Get all files to process"
|
||||
absolute_plaintext_files, filtered_plaintext_files = set(), set()
|
||||
if plaintext_files:
|
||||
absolute_plaintext_files = {get_absolute_path(jsonl_file) for jsonl_file in plaintext_files}
|
||||
if plaintext_file_filters:
|
||||
filtered_plaintext_files = {
|
||||
filtered_file
|
||||
for jsonl_file_filter in plaintext_file_filters
|
||||
for filtered_file in glob.glob(get_absolute_path(jsonl_file_filter), recursive=True)
|
||||
}
|
||||
|
||||
all_target_files = sorted(absolute_plaintext_files | filtered_plaintext_files)
|
||||
|
||||
files_with_no_plaintext_extensions = {
|
||||
target_files for target_files in all_target_files if not PlaintextToJsonl.is_plaintextfile(target_files)
|
||||
}
|
||||
if any(files_with_no_plaintext_extensions):
|
||||
logger.warn(f"Skipping unsupported files from plaintext indexing: {files_with_no_plaintext_extensions}")
|
||||
all_target_files = list(set(all_target_files) - files_with_no_plaintext_extensions)
|
||||
|
||||
logger.debug(f"Processing files: {all_target_files}")
|
||||
|
||||
return all_target_files
|
||||
|
||||
@staticmethod
|
||||
def is_plaintextfile(file: str):
|
||||
"Check if file is plaintext file"
|
||||
return file.endswith(("txt", "md", "markdown", "org", "mbox", "rst", "html", "htm", "xml"))
|
||||
|
||||
@staticmethod
|
||||
def extract_plaintext_entries(plaintext_files: List[str]):
|
||||
"Extract entries from specified plaintext files"
|
||||
entry_to_file_map = []
|
||||
|
||||
for plaintext_file in plaintext_files:
|
||||
with open(plaintext_file, "r") as f:
|
||||
plaintext_content = f.read()
|
||||
entry_to_file_map.append((plaintext_content, plaintext_file))
|
||||
|
||||
return dict(entry_to_file_map)
|
||||
|
||||
@staticmethod
|
||||
def convert_plaintext_entries_to_maps(entry_to_file_map: dict) -> List[Entry]:
|
||||
"Convert each plaintext entries into a dictionary"
|
||||
entries = []
|
||||
for entry, file in entry_to_file_map.items():
|
||||
entries.append(
|
||||
Entry(
|
||||
raw=entry,
|
||||
file=file,
|
||||
compiled=f"{Path(file).stem}\n{entry}",
|
||||
heading=Path(file).stem,
|
||||
)
|
||||
)
|
||||
return entries
|
||||
|
||||
@staticmethod
|
||||
def convert_entries_to_jsonl(entries: List[Entry]):
|
||||
"Convert each entry to JSON and collate as JSONL"
|
||||
return "".join([f"{entry.to_json()}\n" for entry in entries])
|
|
@ -525,6 +525,25 @@ async def search(
|
|||
)
|
||||
]
|
||||
|
||||
if (
|
||||
(t == SearchType.Plaintext or t == SearchType.All)
|
||||
and state.content_index.plaintext
|
||||
and state.search_models.text_search
|
||||
):
|
||||
# query plaintext files
|
||||
search_futures += [
|
||||
executor.submit(
|
||||
text_search.query,
|
||||
user_query,
|
||||
state.search_models.text_search,
|
||||
state.content_index.plaintext,
|
||||
question_embedding=encoded_asymmetric_query,
|
||||
rank_results=r or False,
|
||||
score_threshold=score_threshold,
|
||||
dedupe=dedupe or True,
|
||||
)
|
||||
]
|
||||
|
||||
# Query across each requested content types in parallel
|
||||
with timer("Query took", logger):
|
||||
for search_future in concurrent.futures.as_completed(search_futures):
|
||||
|
|
|
@ -15,7 +15,7 @@ import json
|
|||
web_client = APIRouter()
|
||||
templates = Jinja2Templates(directory=constants.web_directory)
|
||||
|
||||
VALID_TEXT_CONTENT_TYPES = ["org", "markdown", "pdf"]
|
||||
VALID_TEXT_CONTENT_TYPES = ["org", "markdown", "pdf", "plaintext"]
|
||||
|
||||
|
||||
# Create Routes
|
||||
|
@ -47,6 +47,7 @@ if not state.demo:
|
|||
"image": False,
|
||||
"github": False,
|
||||
"notion": False,
|
||||
"plaintext": False,
|
||||
"enable_offline_model": False,
|
||||
"conversation_openai": False,
|
||||
"conversation_gpt4all": False,
|
||||
|
@ -61,6 +62,7 @@ if not state.demo:
|
|||
"image": state.content_index.image is not None,
|
||||
"github": state.content_index.github is not None,
|
||||
"notion": state.content_index.notion is not None,
|
||||
"plaintext": state.content_index.plaintext is not None,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ class SearchType(str, Enum):
|
|||
Pdf = "pdf"
|
||||
Github = "github"
|
||||
Notion = "notion"
|
||||
Plaintext = "plaintext"
|
||||
|
||||
|
||||
class ProcessorType(str, Enum):
|
||||
|
@ -70,6 +71,7 @@ class ContentIndex:
|
|||
github: Optional[TextContent] = None
|
||||
notion: Optional[TextContent] = None
|
||||
image: Optional[ImageContent] = None
|
||||
plaintext: Optional[TextContent] = None
|
||||
plugins: Optional[Dict[str, TextContent]] = None
|
||||
|
||||
|
||||
|
|
|
@ -46,6 +46,12 @@ default_config = {
|
|||
"compressed-jsonl": "~/.khoj/content/notion/notion.jsonl.gz",
|
||||
"embeddings-file": "~/.khoj/content/notion/notion_embeddings.pt",
|
||||
},
|
||||
"plaintext": {
|
||||
"input-files": None,
|
||||
"input-filter": None,
|
||||
"compressed-jsonl": "~/.khoj/content/plaintext/plaintext.jsonl.gz",
|
||||
"embeddings-file": "~/.khoj/content/plaintext/plaintext_embeddings.pt",
|
||||
},
|
||||
},
|
||||
"search-type": {
|
||||
"symmetric": {
|
||||
|
|
|
@ -79,6 +79,7 @@ class ContentConfig(ConfigBase):
|
|||
image: Optional[ImageContentConfig]
|
||||
markdown: Optional[TextContentConfig]
|
||||
pdf: Optional[TextContentConfig]
|
||||
plaintext: Optional[TextContentConfig]
|
||||
github: Optional[GithubContentConfig]
|
||||
plugins: Optional[Dict[str, TextContentConfig]]
|
||||
notion: Optional[NotionContentConfig]
|
||||
|
|
|
@ -25,4 +25,4 @@ search-type:
|
|||
asymmetric:
|
||||
cross-encoder: cross-encoder/ms-marco-MiniLM-L-6-v2
|
||||
encoder: sentence-transformers/msmarco-MiniLM-L-6-v3
|
||||
version: 0.9.1.dev0
|
||||
version: 0.10.1
|
||||
|
|
84
tests/test_plaintext_to_jsonl.py
Normal file
84
tests/test_plaintext_to_jsonl.py
Normal file
|
@ -0,0 +1,84 @@
|
|||
# Standard Packages
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
# Internal Packages
|
||||
from khoj.processor.plaintext.plaintext_to_jsonl import PlaintextToJsonl
|
||||
|
||||
|
||||
def test_plaintext_file(tmp_path):
|
||||
"Convert files with no heading to jsonl."
|
||||
# Arrange
|
||||
entry = f"""
|
||||
Hi, I am a plaintext file and I have some plaintext words.
|
||||
"""
|
||||
plaintextfile = create_file(tmp_path, entry)
|
||||
|
||||
filename = plaintextfile.stem
|
||||
|
||||
# Act
|
||||
# Extract Entries from specified plaintext files
|
||||
file_to_entries = PlaintextToJsonl.extract_plaintext_entries(plaintext_files=[plaintextfile])
|
||||
|
||||
maps = PlaintextToJsonl.convert_plaintext_entries_to_maps(file_to_entries)
|
||||
|
||||
# Convert each entry.file to absolute path to make them JSON serializable
|
||||
for map in maps:
|
||||
map.file = str(Path(map.file).absolute())
|
||||
|
||||
# Process Each Entry from All Notes Files
|
||||
jsonl_string = PlaintextToJsonl.convert_entries_to_jsonl(maps)
|
||||
jsonl_data = [json.loads(json_string) for json_string in jsonl_string.splitlines()]
|
||||
|
||||
# Assert
|
||||
assert len(jsonl_data) == 1
|
||||
# Ensure raw entry with no headings do not get heading prefix prepended
|
||||
assert not jsonl_data[0]["raw"].startswith("#")
|
||||
# Ensure compiled entry has filename prepended as top level heading
|
||||
assert jsonl_data[0]["compiled"] == f"{filename}\n{entry}"
|
||||
|
||||
|
||||
def test_get_plaintext_files(tmp_path):
|
||||
"Ensure Plaintext files specified via input-filter, input-files extracted"
|
||||
# Arrange
|
||||
# Include via input-filter globs
|
||||
group1_file1 = create_file(tmp_path, filename="group1-file1.md")
|
||||
group1_file2 = create_file(tmp_path, filename="group1-file2.md")
|
||||
|
||||
group2_file1 = create_file(tmp_path, filename="group2-file1.markdown")
|
||||
group2_file2 = create_file(tmp_path, filename="group2-file2.markdown")
|
||||
group2_file3 = create_file(tmp_path, filename="group2-file3.mbox")
|
||||
group2_file4 = create_file(tmp_path, filename="group2-file4.html")
|
||||
# Include via input-file field
|
||||
file1 = create_file(tmp_path, filename="notes.txt")
|
||||
# Include unsupported file types
|
||||
create_file(tmp_path, filename="group2-unincluded.py")
|
||||
create_file(tmp_path, filename="group2-unincluded.csv")
|
||||
create_file(tmp_path, filename="group2-unincluded.csv")
|
||||
# Not included by any filter
|
||||
create_file(tmp_path, filename="not-included-markdown.md")
|
||||
create_file(tmp_path, filename="not-included-text.txt")
|
||||
|
||||
expected_files = sorted(
|
||||
map(str, [group1_file1, group1_file2, group2_file1, group2_file2, file1, group2_file3, group2_file4])
|
||||
)
|
||||
|
||||
# Setup input-files, input-filters
|
||||
input_files = [tmp_path / "notes.txt"]
|
||||
input_filter = [tmp_path / "group1*.md", tmp_path / "group2*.*"]
|
||||
|
||||
# Act
|
||||
extracted_plaintext_files = PlaintextToJsonl.get_plaintext_files(input_files, input_filter)
|
||||
|
||||
# Assert
|
||||
assert len(extracted_plaintext_files) == 7
|
||||
assert set(extracted_plaintext_files) == set(expected_files)
|
||||
|
||||
|
||||
# Helper Functions
|
||||
def create_file(tmp_path: Path, entry=None, filename="test.md"):
|
||||
file_ = tmp_path / filename
|
||||
file_.touch()
|
||||
if entry:
|
||||
file_.write_text(entry)
|
||||
return file_
|
Loading…
Reference in a new issue