diff --git a/pyproject.toml b/pyproject.toml index 7ef0282f..e1d84635 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -87,6 +87,7 @@ dependencies = [ "cron-descriptor == 1.4.3", "django_apscheduler == 0.6.2", "anthropic == 0.26.1", + "docx2txt == 0.8" ] dynamic = ["version"] diff --git a/src/khoj/database/models/__init__.py b/src/khoj/database/models/__init__.py index 92415f59..380dec22 100644 --- a/src/khoj/database/models/__init__.py +++ b/src/khoj/database/models/__init__.py @@ -306,6 +306,7 @@ class Entry(BaseModel): NOTION = "notion" GITHUB = "github" CONVERSATION = "conversation" + DOCX = "docx" class EntrySource(models.TextChoices): COMPUTER = "computer" diff --git a/src/khoj/interface/web/assets/icons/docx.svg b/src/khoj/interface/web/assets/icons/docx.svg new file mode 100644 index 00000000..7e588b76 --- /dev/null +++ b/src/khoj/interface/web/assets/icons/docx.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/src/khoj/interface/web/chat.html b/src/khoj/interface/web/chat.html index 639c0642..71ccea07 100644 --- a/src/khoj/interface/web/chat.html +++ b/src/khoj/interface/web/chat.html @@ -48,8 +48,8 @@ Get the Khoj [Desktop](https://khoj.dev/downloads), [Obsidian](https://docs.khoj To get started, just start typing below. You can also type / to see a list of commands. `.trim() - const allowedExtensions = ['text/org', 'text/markdown', 'text/plain', 'text/html', 'application/pdf']; - const allowedFileEndings = ['org', 'md', 'txt', 'html', 'pdf']; + const allowedExtensions = ['text/org', 'text/markdown', 'text/plain', 'text/html', 'application/pdf', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']; + const allowedFileEndings = ['org', 'md', 'txt', 'html', 'pdf', 'docx']; let chatOptions = []; function createCopyParentText(message) { return function(event) { diff --git a/src/khoj/interface/web/content_source_computer_input.html b/src/khoj/interface/web/content_source_computer_input.html index 8dc4d7dc..77816f35 100644 --- a/src/khoj/interface/web/content_source_computer_input.html +++ b/src/khoj/interface/web/content_source_computer_input.html @@ -73,6 +73,8 @@ image_name = "pdf.svg" else if (fileExtension === "markdown" || fileExtension === "md") image_name = "markdown.svg" + else if (fileExtension === "docx") + image_name = "docx.svg" else image_name = "plaintext.svg" diff --git a/src/khoj/processor/content/docx/__init__.py b/src/khoj/processor/content/docx/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/khoj/processor/content/docx/docx_to_entries.py b/src/khoj/processor/content/docx/docx_to_entries.py new file mode 100644 index 00000000..ab28066d --- /dev/null +++ b/src/khoj/processor/content/docx/docx_to_entries.py @@ -0,0 +1,110 @@ +import logging +import os +from datetime import datetime +from typing import Dict, List, Tuple + +from langchain_community.document_loaders import Docx2txtLoader + +from khoj.database.models import Entry as DbEntry +from khoj.database.models import KhojUser +from khoj.processor.content.text_to_entries import TextToEntries +from khoj.utils.helpers import timer +from khoj.utils.rawconfig import Entry + +logger = logging.getLogger(__name__) + + +class DocxToEntries(TextToEntries): + def __init__(self): + super().__init__() + + # Define Functions + def process( + self, files: dict[str, str] = None, full_corpus: bool = True, user: KhojUser = None, regenerate: bool = False + ) -> Tuple[int, int]: + # Extract required fields from config + if not full_corpus: + deletion_file_names = set([file for file in files if files[file] == b""]) + files_to_process = set(files) - deletion_file_names + files = {file: files[file] for file in files_to_process} + else: + deletion_file_names = None + + # Extract Entries from specified Docx files + with timer("Extract entries from specified DOCX files", logger): + file_to_text_map, current_entries = DocxToEntries.extract_docx_entries(files) + + # Split entries by max tokens supported by model + with timer("Split entries by max token size supported by model", logger): + current_entries = self.split_entries_by_max_tokens(current_entries, max_tokens=256) + + # Identify, mark and merge any new entries with previous entries + with timer("Identify new or updated entries", logger): + num_new_embeddings, num_deleted_embeddings = self.update_embeddings( + current_entries, + DbEntry.EntryType.DOCX, + DbEntry.EntrySource.COMPUTER, + "compiled", + logger, + deletion_file_names, + user, + regenerate=regenerate, + file_to_text_map=file_to_text_map, + ) + + return num_new_embeddings, num_deleted_embeddings + + @staticmethod + def extract_docx_entries(docx_files) -> Tuple[Dict, List[Entry]]: + """Extract entries from specified DOCX files""" + + entries: List[str] = [] + entry_to_location_map: List[Tuple[str, str]] = [] + file_to_text_map = dict() + for docx_file in docx_files: + try: + timestamp_now = datetime.utcnow().timestamp() + tmp_file = f"tmp_docx_file_{timestamp_now}.docx" + with open(tmp_file, "wb") as f: + bytes_content = docx_files[docx_file] + f.write(bytes_content) + + # Load the content using Docx2txtLoader + loader = Docx2txtLoader(tmp_file) + docx_entries_per_file = loader.load() + + # Convert the loaded entries into the desired format + docx_texts = [page.page_content for page in docx_entries_per_file] + + entry_to_location_map += zip(docx_texts, [docx_file] * len(docx_texts)) + entries.extend(docx_texts) + file_to_text_map[docx_file] = docx_texts + except Exception as e: + logger.warning(f"Unable to process file: {docx_file}. This file will not be indexed.") + logger.warning(e, exc_info=True) + finally: + if os.path.exists(f"{tmp_file}"): + os.remove(f"{tmp_file}") + return file_to_text_map, DocxToEntries.convert_docx_entries_to_maps(entries, dict(entry_to_location_map)) + + @staticmethod + def convert_docx_entries_to_maps(parsed_entries: List[str], entry_to_file_map) -> List[Entry]: + """Convert each DOCX entry into a dictionary""" + entries = [] + for parsed_entry in parsed_entries: + entry_filename = entry_to_file_map[parsed_entry] + # Append base filename to compiled entry for context to model + heading = f"{entry_filename}\n" + compiled_entry = f"{heading}{parsed_entry}" + entries.append( + Entry( + compiled=compiled_entry, + raw=parsed_entry, + heading=heading, + file=f"{entry_filename}", + ) + ) + + logger.debug(f"Converted {len(parsed_entries)} DOCX entries to dictionaries") + + return entries diff --git a/src/khoj/routers/indexer.py b/src/khoj/routers/indexer.py index 1e0184cf..4391b1ec 100644 --- a/src/khoj/routers/indexer.py +++ b/src/khoj/routers/indexer.py @@ -7,6 +7,7 @@ from pydantic import BaseModel from starlette.authentication import requires from khoj.database.models import GithubConfig, KhojUser, NotionConfig +from khoj.processor.content.docx.docx_to_entries import DocxToEntries from khoj.processor.content.github.github_to_entries import GithubToEntries from khoj.processor.content.markdown.markdown_to_entries import MarkdownToEntries from khoj.processor.content.notion.notion_to_entries import NotionToEntries @@ -40,6 +41,7 @@ class IndexerInput(BaseModel): markdown: Optional[dict[str, str]] = None pdf: Optional[dict[str, bytes]] = None plaintext: Optional[dict[str, str]] = None + docx: Optional[dict[str, bytes]] = None @indexer.post("/update") @@ -63,7 +65,7 @@ async def update( ), ): user = request.user.object - index_files: Dict[str, Dict[str, str]] = {"org": {}, "markdown": {}, "pdf": {}, "plaintext": {}} + index_files: Dict[str, Dict[str, str]] = {"org": {}, "markdown": {}, "pdf": {}, "plaintext": {}, "docx": {}} try: logger.info(f"📬 Updating content index via API call by {client} client") for file in files: @@ -79,6 +81,7 @@ async def update( markdown=index_files["markdown"], pdf=index_files["pdf"], plaintext=index_files["plaintext"], + docx=index_files["docx"], ) if state.config == None: @@ -93,6 +96,7 @@ async def update( org=None, markdown=None, pdf=None, + docx=None, image=None, github=None, notion=None, @@ -129,6 +133,7 @@ async def update( "num_markdown": len(index_files["markdown"]), "num_pdf": len(index_files["pdf"]), "num_plaintext": len(index_files["plaintext"]), + "num_docx": len(index_files["docx"]), } update_telemetry_state( @@ -295,6 +300,20 @@ def configure_content( logger.error(f"🚨 Failed to setup Notion: {e}", exc_info=True) success = False + try: + if (search_type == state.SearchType.All.value or search_type == state.SearchType.Docx.value) and files["docx"]: + logger.info("📄 Setting up search for docx") + text_search.setup( + DocxToEntries, + files.get("docx"), + regenerate=regenerate, + full_corpus=full_corpus, + user=user, + ) + except Exception as e: + logger.error(f"🚨 Failed to setup docx: {e}", exc_info=True) + success = False + # Invalidate Query Cache if user: state.query_cache[user.uuid] = LRU() diff --git a/src/khoj/utils/config.py b/src/khoj/utils/config.py index 1732271a..0e88075f 100644 --- a/src/khoj/utils/config.py +++ b/src/khoj/utils/config.py @@ -28,6 +28,7 @@ class SearchType(str, Enum): Github = "github" Notion = "notion" Plaintext = "plaintext" + Docx = "docx" class ProcessorType(str, Enum): diff --git a/src/khoj/utils/helpers.py b/src/khoj/utils/helpers.py index 59327b0d..de6b7285 100644 --- a/src/khoj/utils/helpers.py +++ b/src/khoj/utils/helpers.py @@ -115,6 +115,8 @@ def get_file_type(file_type: str, file_content: bytes) -> tuple[str, str]: return "org", encoding elif file_type in ["application/pdf"]: return "pdf", encoding + elif file_type in ["application/msword", "application/vnd.openxmlformats-officedocument.wordprocessingml.document"]: + return "docx", encoding elif file_type in ["image/jpeg"]: return "jpeg", encoding elif file_type in ["image/png"]: diff --git a/src/khoj/utils/rawconfig.py b/src/khoj/utils/rawconfig.py index fbc873f6..b4bdcbea 100644 --- a/src/khoj/utils/rawconfig.py +++ b/src/khoj/utils/rawconfig.py @@ -65,6 +65,7 @@ class ContentConfig(ConfigBase): plaintext: Optional[TextContentConfig] = None github: Optional[GithubContentConfig] = None notion: Optional[NotionContentConfig] = None + docx: Optional[TextContentConfig] = None class ImageSearchConfig(ConfigBase): diff --git a/tests/data/docx/bangalore.docx b/tests/data/docx/bangalore.docx new file mode 100644 index 00000000..df1dd761 Binary files /dev/null and b/tests/data/docx/bangalore.docx differ diff --git a/tests/data/docx/iceland.docx b/tests/data/docx/iceland.docx new file mode 100644 index 00000000..cfdbf2a8 Binary files /dev/null and b/tests/data/docx/iceland.docx differ diff --git a/tests/test_client.py b/tests/test_client.py index 13f226c1..d3c18030 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -61,7 +61,7 @@ def test_search_with_invalid_content_type(client): @pytest.mark.django_db(transaction=True) def test_search_with_valid_content_type(client): headers = {"Authorization": "Bearer kk-secret"} - for content_type in ["all", "org", "markdown", "image", "pdf", "github", "notion", "plaintext"]: + for content_type in ["all", "org", "markdown", "image", "pdf", "github", "notion", "plaintext", "docx"]: # Act response = client.get(f"/api/search?q=random&t={content_type}", headers=headers) # Assert @@ -480,6 +480,14 @@ def get_sample_files_data(): ("files", ("path/to/filename1.txt", "my first web page", "text/plain")), ("files", ("path/to/filename2.txt", "2021-02-02 Journal Entry", "text/plain")), ("files", ("path/to/filename.md", "# Notes from client call", "text/markdown")), + ( + "files", + ( + "path/to/filename.docx", + "## Studying anthropological records from the Fatimid caliphate", + "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ), + ), ( "files", ("path/to/filename1.md", "## Studying anthropological records from the Fatimid caliphate", "text/markdown"), diff --git a/tests/test_docx_to_entries.py b/tests/test_docx_to_entries.py new file mode 100644 index 00000000..089c7fec --- /dev/null +++ b/tests/test_docx_to_entries.py @@ -0,0 +1,37 @@ +import os + +from khoj.processor.content.docx.docx_to_entries import DocxToEntries + + +def test_single_page_docx_to_jsonl(): + "Convert single page DOCX file to jsonl." + # Act + # Extract Entries from specified Docx files + # Read singlepage.docx into memory as bytes + with open("tests/data/docx/iceland.docx", "rb") as f: + docx_bytes = f.read() + + data = {"tests/data/docx/iceland.docx": docx_bytes} + entries = DocxToEntries.extract_docx_entries(docx_files=data) + + # Assert + assert "The Icelandic horse" in entries[0]["tests/data/docx/iceland.docx"][0] + assert len(entries) == 2 + assert len(entries[1]) == 1 + + +def test_multi_page_docx_to_jsonl(): + "Convert multi page DOCX file to jsonl." + # Act + # Extract Entries from specified Docx files + # Read multipage.docx into memory as bytes + with open("tests/data/docx/bangalore.docx", "rb") as f: + docx_bytes = f.read() + + data = {"tests/data/docx/bangalore.docx": docx_bytes} + entries = DocxToEntries.extract_docx_entries(docx_files=data) + + # Assert + assert "Bangalore" in entries[0]["tests/data/docx/bangalore.docx"][0] + assert len(entries) == 2 + assert len(entries[1]) == 1