2022-06-29 21:09:48 +02:00
|
|
|
# System Packages
|
2023-05-12 11:37:34 +02:00
|
|
|
import logging
|
2023-10-18 06:31:15 +02:00
|
|
|
import locale
|
2022-06-29 21:09:48 +02:00
|
|
|
from pathlib import Path
|
2023-06-14 01:55:58 +02:00
|
|
|
import os
|
2022-06-29 21:09:48 +02:00
|
|
|
|
2022-09-10 12:05:21 +02:00
|
|
|
# External Packages
|
|
|
|
import pytest
|
|
|
|
|
2021-10-03 05:28:33 +02:00
|
|
|
# Internal Packages
|
2023-07-14 10:19:38 +02:00
|
|
|
from khoj.utils.state import content_index, search_models
|
2023-02-14 21:50:51 +01:00
|
|
|
from khoj.search_type import text_search
|
|
|
|
from khoj.processor.org_mode.org_to_jsonl import OrgToJsonl
|
2023-06-14 01:55:58 +02:00
|
|
|
from khoj.processor.github.github_to_jsonl import GithubToJsonl
|
2023-10-18 06:31:15 +02:00
|
|
|
from khoj.utils.config import SearchModels
|
2023-08-31 21:55:17 +02:00
|
|
|
from khoj.utils.fs_syncer import get_org_files
|
2023-10-18 06:31:15 +02:00
|
|
|
from khoj.utils.rawconfig import ContentConfig, SearchConfig, TextContentConfig
|
2021-10-03 05:28:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
# Test
|
2022-09-11 00:09:24 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-10-18 06:31:15 +02:00
|
|
|
def test_text_search_setup_with_missing_file_raises_error(org_config_with_only_new_file: TextContentConfig):
|
2022-09-11 00:09:24 +02:00
|
|
|
# Arrange
|
2023-01-09 20:17:36 +01:00
|
|
|
# Ensure file mentioned in org.input-files is missing
|
|
|
|
single_new_file = Path(org_config_with_only_new_file.input_files[0])
|
|
|
|
single_new_file.unlink()
|
2022-09-11 00:09:24 +02:00
|
|
|
|
|
|
|
# Act
|
|
|
|
# Generate notes embeddings during asymmetric setup
|
2023-08-31 21:55:17 +02:00
|
|
|
with pytest.raises(FileNotFoundError):
|
2023-10-18 06:31:15 +02:00
|
|
|
get_org_files(org_config_with_only_new_file)
|
2022-09-11 00:09:24 +02:00
|
|
|
|
|
|
|
|
2022-09-10 12:05:21 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-16 10:23:22 +02:00
|
|
|
def test_text_search_setup_with_empty_file_raises_error(
|
2023-02-17 17:04:26 +01:00
|
|
|
org_config_with_only_new_file: TextContentConfig, search_config: SearchConfig
|
|
|
|
):
|
2023-08-31 21:55:17 +02:00
|
|
|
# Arrange
|
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
2022-09-10 12:05:21 +02:00
|
|
|
# Act
|
|
|
|
# Generate notes embeddings during asymmetric setup
|
2023-02-17 17:04:26 +01:00
|
|
|
with pytest.raises(ValueError, match=r"^No valid entries found*"):
|
2023-08-31 21:55:17 +02:00
|
|
|
text_search.setup(OrgToJsonl, data, org_config_with_only_new_file, search_config.asymmetric, regenerate=True)
|
2022-09-10 12:05:21 +02:00
|
|
|
|
|
|
|
|
2021-10-03 05:28:33 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-16 10:23:22 +02:00
|
|
|
def test_text_search_setup(content_config: ContentConfig, search_models: SearchModels):
|
2023-08-31 21:55:17 +02:00
|
|
|
# Arrange
|
|
|
|
data = get_org_files(content_config.org)
|
2023-10-18 06:31:15 +02:00
|
|
|
|
2021-10-03 05:28:33 +02:00
|
|
|
# Act
|
|
|
|
# Regenerate notes embeddings during asymmetric setup
|
2023-07-14 10:19:38 +02:00
|
|
|
notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True
|
2023-07-14 10:19:38 +02:00
|
|
|
)
|
2021-10-03 05:28:33 +02:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(notes_model.entries) == 10
|
|
|
|
assert len(notes_model.corpus_embeddings) == 10
|
|
|
|
|
|
|
|
|
2023-05-12 11:37:34 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-16 10:23:22 +02:00
|
|
|
def test_text_index_same_if_content_unchanged(content_config: ContentConfig, search_models: SearchModels, caplog):
|
2023-05-12 11:37:34 +02:00
|
|
|
# Arrange
|
|
|
|
caplog.set_level(logging.INFO, logger="khoj")
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
|
|
|
|
2023-05-12 11:37:34 +02:00
|
|
|
# Act
|
|
|
|
# Generate initial notes embeddings during asymmetric setup
|
2023-08-31 21:55:17 +02:00
|
|
|
text_search.setup(OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True)
|
2023-05-12 11:37:34 +02:00
|
|
|
initial_logs = caplog.text
|
|
|
|
caplog.clear() # Clear logs
|
|
|
|
|
|
|
|
# Run asymmetric setup again with no changes to data source. Ensure index is not updated
|
2023-08-31 21:55:17 +02:00
|
|
|
text_search.setup(OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=False)
|
2023-05-12 11:37:34 +02:00
|
|
|
final_logs = caplog.text
|
|
|
|
|
|
|
|
# Assert
|
2023-07-16 12:47:05 +02:00
|
|
|
assert "Creating index from scratch." in initial_logs
|
|
|
|
assert "Creating index from scratch." not in final_logs
|
2023-05-12 11:37:34 +02:00
|
|
|
|
|
|
|
|
2021-10-03 05:28:33 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-06-29 05:11:26 +02:00
|
|
|
@pytest.mark.anyio
|
2023-07-29 00:33:22 +02:00
|
|
|
async def test_text_search(content_config: ContentConfig, search_config: SearchConfig):
|
2021-10-03 05:28:33 +02:00
|
|
|
# Arrange
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
|
|
|
|
2023-07-14 10:19:38 +02:00
|
|
|
search_models.text_search = text_search.initialize_model(search_config.asymmetric)
|
|
|
|
content_index.org = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True
|
2023-07-14 10:19:38 +02:00
|
|
|
)
|
2021-10-03 05:28:33 +02:00
|
|
|
query = "How to git install application?"
|
|
|
|
|
|
|
|
# Act
|
2023-07-14 10:19:38 +02:00
|
|
|
hits, entries = await text_search.query(
|
|
|
|
query, search_model=search_models.text_search, content=content_index.org, rank_results=True
|
|
|
|
)
|
2021-10-03 05:28:33 +02:00
|
|
|
|
2023-02-17 17:04:26 +01:00
|
|
|
results = text_search.collate_results(hits, entries, count=1)
|
2021-10-03 05:28:33 +02:00
|
|
|
|
|
|
|
# Assert
|
2023-07-29 00:33:22 +02:00
|
|
|
# search results should contain "git clone" entry
|
2022-09-15 22:34:43 +02:00
|
|
|
search_result = results[0].entry
|
2021-10-03 05:28:33 +02:00
|
|
|
assert "git clone" in search_result
|
2022-06-29 21:09:48 +02:00
|
|
|
|
|
|
|
|
2022-12-23 19:52:02 +01:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-14 10:19:38 +02:00
|
|
|
def test_entry_chunking_by_max_tokens(org_config_with_only_new_file: TextContentConfig, search_models: SearchModels):
|
2022-12-23 19:52:02 +01:00
|
|
|
# Arrange
|
2022-12-26 01:45:40 +01:00
|
|
|
# Insert org-mode entry with size exceeding max token limit to new org file
|
2022-12-23 19:52:02 +01:00
|
|
|
max_tokens = 256
|
2023-01-09 20:17:36 +01:00
|
|
|
new_file_to_index = Path(org_config_with_only_new_file.input_files[0])
|
|
|
|
with open(new_file_to_index, "w") as f:
|
2022-12-23 19:52:02 +01:00
|
|
|
f.write(f"* Entry more than {max_tokens} words\n")
|
2023-02-17 17:04:26 +01:00
|
|
|
for index in range(max_tokens + 1):
|
2022-12-23 19:52:02 +01:00
|
|
|
f.write(f"{index} ")
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
|
|
|
|
2022-12-23 19:52:02 +01:00
|
|
|
# Act
|
|
|
|
# reload embeddings, entries, notes model after adding new org-mode file
|
2023-02-17 17:04:26 +01:00
|
|
|
initial_notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, org_config_with_only_new_file, search_models.text_search.bi_encoder, regenerate=False
|
2023-02-17 17:04:26 +01:00
|
|
|
)
|
2022-12-23 19:52:02 +01:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
# verify newly added org-mode entry is split by max tokens
|
2023-01-09 20:17:36 +01:00
|
|
|
assert len(initial_notes_model.entries) == 2
|
|
|
|
assert len(initial_notes_model.corpus_embeddings) == 2
|
2022-12-23 19:52:02 +01:00
|
|
|
|
|
|
|
|
2023-09-06 21:04:18 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
# @pytest.mark.skip(reason="Flaky due to compressed_jsonl file being rewritten by other tests")
|
|
|
|
def test_entry_chunking_by_max_tokens_not_full_corpus(
|
|
|
|
org_config_with_only_new_file: TextContentConfig, search_models: SearchModels
|
|
|
|
):
|
|
|
|
# Arrange
|
|
|
|
# Insert org-mode entry with size exceeding max token limit to new org file
|
|
|
|
data = {
|
|
|
|
"readme.org": """
|
|
|
|
* Khoj
|
|
|
|
/Allow natural language search on user content like notes, images using transformer based models/
|
|
|
|
|
|
|
|
All data is processed locally. User can interface with khoj app via [[./interface/emacs/khoj.el][Emacs]], API or Commandline
|
|
|
|
|
|
|
|
** Dependencies
|
|
|
|
- Python3
|
|
|
|
- [[https://docs.conda.io/en/latest/miniconda.html#latest-miniconda-installer-links][Miniconda]]
|
|
|
|
|
|
|
|
** Install
|
|
|
|
#+begin_src shell
|
|
|
|
git clone https://github.com/khoj-ai/khoj && cd khoj
|
|
|
|
conda env create -f environment.yml
|
|
|
|
conda activate khoj
|
|
|
|
#+end_src"""
|
|
|
|
}
|
|
|
|
text_search.setup(
|
|
|
|
OrgToJsonl,
|
|
|
|
data,
|
|
|
|
org_config_with_only_new_file,
|
|
|
|
search_models.text_search.bi_encoder,
|
|
|
|
regenerate=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
max_tokens = 256
|
|
|
|
new_file_to_index = Path(org_config_with_only_new_file.input_files[0])
|
|
|
|
with open(new_file_to_index, "w") as f:
|
|
|
|
f.write(f"* Entry more than {max_tokens} words\n")
|
|
|
|
for index in range(max_tokens + 1):
|
|
|
|
f.write(f"{index} ")
|
|
|
|
|
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
|
|
|
|
|
|
|
# Act
|
|
|
|
# reload embeddings, entries, notes model after adding new org-mode file
|
|
|
|
initial_notes_model = text_search.setup(
|
|
|
|
OrgToJsonl,
|
|
|
|
data,
|
|
|
|
org_config_with_only_new_file,
|
|
|
|
search_models.text_search.bi_encoder,
|
|
|
|
regenerate=False,
|
|
|
|
full_corpus=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
# verify newly added org-mode entry is split by max tokens
|
|
|
|
assert len(initial_notes_model.entries) == 5
|
|
|
|
assert len(initial_notes_model.corpus_embeddings) == 5
|
|
|
|
|
|
|
|
|
2022-06-29 21:09:48 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-16 09:47:11 +02:00
|
|
|
def test_regenerate_index_with_new_entry(
|
|
|
|
content_config: ContentConfig, search_models: SearchModels, new_org_file: Path
|
|
|
|
):
|
2022-06-29 21:09:48 +02:00
|
|
|
# Arrange
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
2023-07-14 10:19:38 +02:00
|
|
|
initial_notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True
|
2023-07-14 10:19:38 +02:00
|
|
|
)
|
2022-06-29 21:09:48 +02:00
|
|
|
|
|
|
|
assert len(initial_notes_model.entries) == 10
|
|
|
|
assert len(initial_notes_model.corpus_embeddings) == 10
|
|
|
|
|
2023-01-09 20:17:36 +01:00
|
|
|
# append org-mode entry to first org input file in config
|
2023-02-17 17:04:26 +01:00
|
|
|
content_config.org.input_files = [f"{new_org_file}"]
|
2023-01-09 20:17:36 +01:00
|
|
|
with open(new_org_file, "w") as f:
|
2022-06-29 21:09:48 +02:00
|
|
|
f.write("\n* A Chihuahua doing Tango\n- Saw a super cute video of a chihuahua doing the Tango on Youtube\n")
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
|
|
|
|
2023-07-16 09:47:11 +02:00
|
|
|
# Act
|
2022-06-29 21:09:48 +02:00
|
|
|
# regenerate notes jsonl, model embeddings and model to include entry from new file
|
2023-02-17 17:04:26 +01:00
|
|
|
regenerated_notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True
|
2023-02-17 17:04:26 +01:00
|
|
|
)
|
2022-06-29 21:09:48 +02:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(regenerated_notes_model.entries) == 11
|
|
|
|
assert len(regenerated_notes_model.corpus_embeddings) == 11
|
|
|
|
|
2023-07-16 09:47:11 +02:00
|
|
|
# verify new entry appended to index, without disrupting order or content of existing entries
|
|
|
|
error_details = compare_index(initial_notes_model, regenerated_notes_model)
|
|
|
|
if error_details:
|
|
|
|
pytest.fail(error_details, False)
|
2022-06-29 21:09:48 +02:00
|
|
|
|
|
|
|
# Cleanup
|
2023-01-09 20:17:36 +01:00
|
|
|
# reset input_files in config to empty list
|
2022-07-06 23:25:31 +02:00
|
|
|
content_config.org.input_files = []
|
2022-09-07 02:06:29 +02:00
|
|
|
|
|
|
|
|
2023-07-16 02:18:07 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
def test_update_index_with_duplicate_entries_in_stable_order(
|
|
|
|
org_config_with_only_new_file: TextContentConfig, search_models: SearchModels
|
|
|
|
):
|
|
|
|
# Arrange
|
|
|
|
new_file_to_index = Path(org_config_with_only_new_file.input_files[0])
|
|
|
|
|
|
|
|
# Insert org-mode entries with same compiled form into new org file
|
|
|
|
new_entry = "* TODO A Chihuahua doing Tango\n- Saw a super cute video of a chihuahua doing the Tango on Youtube\n"
|
|
|
|
with open(new_file_to_index, "w") as f:
|
|
|
|
f.write(f"{new_entry}{new_entry}")
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
|
|
|
|
2023-07-16 02:18:07 +02:00
|
|
|
# Act
|
|
|
|
# load embeddings, entries, notes model after adding new org-mode file
|
|
|
|
initial_index = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, org_config_with_only_new_file, search_models.text_search.bi_encoder, regenerate=True
|
2023-07-16 02:18:07 +02:00
|
|
|
)
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
|
|
|
|
2023-07-16 02:18:07 +02:00
|
|
|
# update embeddings, entries, notes model after adding new org-mode file
|
|
|
|
updated_index = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, org_config_with_only_new_file, search_models.text_search.bi_encoder, regenerate=False
|
2023-07-16 02:18:07 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
# verify only 1 entry added even if there are multiple duplicate entries
|
|
|
|
assert len(initial_index.entries) == len(updated_index.entries) == 1
|
|
|
|
assert len(initial_index.corpus_embeddings) == len(updated_index.corpus_embeddings) == 1
|
|
|
|
|
|
|
|
# verify the same entry is added even when there are multiple duplicate entries
|
|
|
|
error_details = compare_index(initial_index, updated_index)
|
|
|
|
if error_details:
|
|
|
|
pytest.fail(error_details)
|
|
|
|
|
|
|
|
|
2023-07-16 12:47:05 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
def test_update_index_with_deleted_entry(org_config_with_only_new_file: TextContentConfig, search_models: SearchModels):
|
|
|
|
# Arrange
|
|
|
|
new_file_to_index = Path(org_config_with_only_new_file.input_files[0])
|
|
|
|
|
|
|
|
# Insert org-mode entries with same compiled form into new org file
|
|
|
|
new_entry = "* TODO A Chihuahua doing Tango\n- Saw a super cute video of a chihuahua doing the Tango on Youtube\n"
|
|
|
|
with open(new_file_to_index, "w") as f:
|
|
|
|
f.write(f"{new_entry}{new_entry} -- Tatooine")
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
2023-07-16 12:47:05 +02:00
|
|
|
|
|
|
|
# load embeddings, entries, notes model after adding new org file with 2 entries
|
|
|
|
initial_index = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, org_config_with_only_new_file, search_models.text_search.bi_encoder, regenerate=True
|
2023-07-16 12:47:05 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# update embeddings, entries, notes model after removing an entry from the org file
|
|
|
|
with open(new_file_to_index, "w") as f:
|
|
|
|
f.write(f"{new_entry}")
|
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(org_config_with_only_new_file)
|
|
|
|
|
2023-07-16 12:47:05 +02:00
|
|
|
# Act
|
|
|
|
updated_index = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, org_config_with_only_new_file, search_models.text_search.bi_encoder, regenerate=False
|
2023-07-16 12:47:05 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
# verify only 1 entry added even if there are multiple duplicate entries
|
|
|
|
assert len(initial_index.entries) == len(updated_index.entries) + 1
|
|
|
|
assert len(initial_index.corpus_embeddings) == len(updated_index.corpus_embeddings) + 1
|
|
|
|
|
|
|
|
# verify the same entry is added even when there are multiple duplicate entries
|
|
|
|
error_details = compare_index(updated_index, initial_index)
|
|
|
|
if error_details:
|
|
|
|
pytest.fail(error_details)
|
|
|
|
|
|
|
|
|
2022-09-07 02:06:29 +02:00
|
|
|
# ----------------------------------------------------------------------------------------------------
|
2023-07-16 10:23:22 +02:00
|
|
|
def test_update_index_with_new_entry(content_config: ContentConfig, search_models: SearchModels, new_org_file: Path):
|
2022-09-07 02:06:29 +02:00
|
|
|
# Arrange
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
2023-07-14 10:19:38 +02:00
|
|
|
initial_notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=True, normalize=False
|
2023-07-14 10:19:38 +02:00
|
|
|
)
|
2022-09-07 02:06:29 +02:00
|
|
|
|
2023-01-09 20:17:36 +01:00
|
|
|
# append org-mode entry to first org input file in config
|
|
|
|
with open(new_org_file, "w") as f:
|
2023-07-16 10:24:03 +02:00
|
|
|
new_entry = "\n* A Chihuahua doing Tango\n- Saw a super cute video of a chihuahua doing the Tango on Youtube\n"
|
|
|
|
f.write(new_entry)
|
2022-09-07 02:06:29 +02:00
|
|
|
|
2023-08-31 21:55:17 +02:00
|
|
|
data = get_org_files(content_config.org)
|
|
|
|
|
2022-09-07 02:06:29 +02:00
|
|
|
# Act
|
|
|
|
# update embeddings, entries with the newly added note
|
2023-02-17 17:04:26 +01:00
|
|
|
content_config.org.input_files = [f"{new_org_file}"]
|
2023-07-16 10:24:03 +02:00
|
|
|
final_notes_model = text_search.setup(
|
2023-08-31 21:55:17 +02:00
|
|
|
OrgToJsonl, data, content_config.org, search_models.text_search.bi_encoder, regenerate=False, normalize=False
|
2023-07-14 10:19:38 +02:00
|
|
|
)
|
2022-09-07 02:06:29 +02:00
|
|
|
|
2023-01-09 20:17:36 +01:00
|
|
|
# Assert
|
2023-07-16 10:24:03 +02:00
|
|
|
assert len(final_notes_model.entries) == len(initial_notes_model.entries) + 1
|
|
|
|
assert len(final_notes_model.corpus_embeddings) == len(initial_notes_model.corpus_embeddings) + 1
|
|
|
|
|
|
|
|
# verify new entry appended to index, without disrupting order or content of existing entries
|
|
|
|
error_details = compare_index(initial_notes_model, final_notes_model)
|
|
|
|
if error_details:
|
|
|
|
pytest.fail(error_details, False)
|
2022-09-07 02:06:29 +02:00
|
|
|
|
|
|
|
# Cleanup
|
2023-01-09 20:17:36 +01:00
|
|
|
# reset input_files in config to empty list
|
2022-09-07 02:06:29 +02:00
|
|
|
content_config.org.input_files = []
|
2023-06-14 01:55:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------------------------------------
|
|
|
|
@pytest.mark.skipif(os.getenv("GITHUB_PAT_TOKEN") is None, reason="GITHUB_PAT_TOKEN not set")
|
2023-07-29 00:33:22 +02:00
|
|
|
def test_text_search_setup_github(content_config: ContentConfig, search_models: SearchModels):
|
2023-06-14 01:55:58 +02:00
|
|
|
# Act
|
2023-06-18 11:24:25 +02:00
|
|
|
# Regenerate github embeddings to test asymmetric setup without caching
|
2023-07-14 10:19:38 +02:00
|
|
|
github_model = text_search.setup(
|
|
|
|
GithubToJsonl, content_config.github, search_models.text_search.bi_encoder, regenerate=True
|
|
|
|
)
|
2023-06-14 01:55:58 +02:00
|
|
|
|
|
|
|
# Assert
|
|
|
|
assert len(github_model.entries) > 1
|
2023-07-15 23:33:15 +02:00
|
|
|
|
|
|
|
|
|
|
|
def compare_index(initial_notes_model, final_notes_model):
|
|
|
|
mismatched_entries, mismatched_embeddings = [], []
|
|
|
|
for index in range(len(initial_notes_model.entries)):
|
|
|
|
if initial_notes_model.entries[index].to_json() != final_notes_model.entries[index].to_json():
|
|
|
|
mismatched_entries.append(index)
|
|
|
|
|
|
|
|
# verify new entry embedding appended to embeddings tensor, without disrupting order or content of existing embeddings
|
|
|
|
for index in range(len(initial_notes_model.corpus_embeddings)):
|
2023-08-14 16:56:33 +02:00
|
|
|
if not initial_notes_model.corpus_embeddings[index].allclose(final_notes_model.corpus_embeddings[index]):
|
2023-07-15 23:33:15 +02:00
|
|
|
mismatched_embeddings.append(index)
|
|
|
|
|
|
|
|
error_details = ""
|
|
|
|
if mismatched_entries:
|
|
|
|
mismatched_entries_str = ",".join(map(str, mismatched_entries))
|
|
|
|
error_details += f"Entries at {mismatched_entries_str} not equal\n"
|
|
|
|
if mismatched_embeddings:
|
|
|
|
mismatched_embeddings_str = ", ".join(map(str, mismatched_embeddings))
|
|
|
|
error_details += f"Embeddings at {mismatched_embeddings_str} not equal\n"
|
|
|
|
|
|
|
|
return error_details
|