mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 15:38:55 +01:00
Use better cmdline argument names. Drop unneeded no-compress argument
Can infer to compress or not via the output_file suffix
This commit is contained in:
parent
d9f60c00bf
commit
85bf15628d
4 changed files with 27 additions and 27 deletions
12
README.md
12
README.md
|
@ -22,8 +22,8 @@ Setup
|
|||
Generate compressed JSONL from specified org-mode files
|
||||
```sh
|
||||
python3 processor/org-mode/org-to-jsonl.py \
|
||||
--org-files "~/Notes/Schedule.org" "~/Notes/Incoming.org" \
|
||||
--jsonl-file ".notes.jsonl" \
|
||||
--input-files ~/Notes/Schedule.org ~/Notes/Incoming.org \
|
||||
--output-file .notes.jsonl.gz \
|
||||
--verbose
|
||||
```
|
||||
|
||||
|
@ -32,8 +32,8 @@ Run
|
|||
Load ML model, generate embeddings and expose API interface to run user queries on above org-mode files
|
||||
```sh
|
||||
python3 main.py \
|
||||
--jsonl-file .notes.jsonl.gz \
|
||||
--embeddings-file .notes_embeddings.pt \
|
||||
--compressed-jsonl .notes.jsonl.gz \
|
||||
--embeddings .notes_embeddings.pt \
|
||||
--verbose
|
||||
```
|
||||
|
||||
|
@ -49,8 +49,8 @@ Use
|
|||
- *Call Semantic Search via Python Script Directly*
|
||||
```sh
|
||||
python3 search_types/asymmetric.py \
|
||||
--jsonl-file .notes.jsonl.gz \
|
||||
--embeddings-file .notes_embeddings.pt \
|
||||
--compressed-jsonl .notes.jsonl.gz \
|
||||
--embeddings .notes_embeddings.pt \
|
||||
--results-count 5 \
|
||||
--verbose \
|
||||
--interactive
|
||||
|
|
8
main.py
8
main.py
|
@ -44,8 +44,8 @@ def search(q: str, n: Optional[int] = 5, t: Optional[str] = 'notes'):
|
|||
if __name__ == '__main__':
|
||||
# Setup Argument Parser
|
||||
parser = argparse.ArgumentParser(description="Expose API for Semantic Search")
|
||||
parser.add_argument('--jsonl-file', '-j', required=True, type=pathlib.Path, help="Input file for compressed JSONL formatted notes to compute embeddings from")
|
||||
parser.add_argument('--embeddings-file', '-e', type=pathlib.Path, help="File to save/load model embeddings to/from. Default: ./embeddings.pt")
|
||||
parser.add_argument('--compressed-jsonl', '-j', required=True, type=pathlib.Path, help="Compressed JSONL formatted notes file to compute embeddings from")
|
||||
parser.add_argument('--embeddings', '-e', required=True, type=pathlib.Path, help="File to save/load model embeddings to/from")
|
||||
parser.add_argument('--verbose', action='store_true', default=False, help="Show verbose conversion logs. Default: false")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -53,10 +53,10 @@ if __name__ == '__main__':
|
|||
bi_encoder, cross_encoder, top_k = asymmetric.initialize_model()
|
||||
|
||||
# Extract Entries
|
||||
entries = asymmetric.extract_entries(args.jsonl_file, args.verbose)
|
||||
entries = asymmetric.extract_entries(args.compressed_jsonl, args.verbose)
|
||||
|
||||
# Compute or Load Embeddings
|
||||
corpus_embeddings = asymmetric.compute_embeddings(entries, bi_encoder, args.embeddings_file, args.verbose)
|
||||
corpus_embeddings = asymmetric.compute_embeddings(entries, bi_encoder, args.embeddings, args.verbose)
|
||||
|
||||
# Generate search_notes method from initialized model, entries and embeddings
|
||||
search_notes = create_search_notes(corpus_embeddings, entries, bi_encoder, cross_encoder, top_k)
|
||||
|
|
|
@ -20,11 +20,11 @@ def dump_jsonl(jsonl_data, output_path, verbose=0):
|
|||
|
||||
|
||||
def compress_jsonl_data(jsonl_data, output_path, verbose=0):
|
||||
with gzip.open(f'{output_path}.gz', 'wt') as gzip_file:
|
||||
with gzip.open(get_absolute_path(output_path), 'wt') as gzip_file:
|
||||
gzip_file.write(jsonl_data)
|
||||
|
||||
if verbose > 0:
|
||||
print(f'Wrote {len(jsonl_data)} records to gzip compressed jsonl at {output_path}.gz')
|
||||
print(f'Wrote {len(jsonl_data)} records to gzip compressed jsonl at {output_path}')
|
||||
|
||||
|
||||
def load_jsonl(input_path, verbose=0):
|
||||
|
@ -112,19 +112,19 @@ def get_absolute_path(filepath):
|
|||
if __name__ == '__main__':
|
||||
# Setup Argument Parser
|
||||
parser = argparse.ArgumentParser(description="Map Org-Mode notes into (compressed) JSONL format")
|
||||
parser.add_argument('--jsonl-file', '-o', type=pathlib.Path, required=True, help="Output file for JSONL formatted notes")
|
||||
parser.add_argument('--org-files', '-i', nargs='*', help="List of org-mode files to process")
|
||||
parser.add_argument('--org-file-filter', type=str, default=None, help="Regex filter for org-mode files to process")
|
||||
parser.add_argument('--no-compress', action='store_true', default=False, help="Do not compress jsonl output with gunzip. Default: False")
|
||||
parser.add_argument('--output-file', '-o', type=pathlib.Path, required=True, help="Output file for (compressed) JSONL formatted notes. Expected file extensions: jsonl or jsonl.gz")
|
||||
parser.add_argument('--input-files', '-i', nargs='*', help="List of org-mode files to process")
|
||||
parser.add_argument('--input-filter', type=str, default=None, help="Regex filter for org-mode files to process")
|
||||
parser.add_argument('--verbose', '-v', action='count', help="Show verbose conversion logs")
|
||||
args = parser.parse_args()
|
||||
|
||||
if is_none_or_empty(args.org_files) and is_none_or_empty(args.org_file_filter):
|
||||
# Input Validation
|
||||
if is_none_or_empty(args.input_files) and is_none_or_empty(args.input_filter):
|
||||
print("At least one of org-files or org-file-filter is required to be specified")
|
||||
exit(1)
|
||||
|
||||
# Get Org Files to Process
|
||||
org_files = get_org_files(args.org_files, args.org_file_filter)
|
||||
org_files = get_org_files(args.input_files, args.input_filter)
|
||||
|
||||
# Extract Entries from specified Org files
|
||||
entries = extract_org_entries(org_files)
|
||||
|
@ -133,7 +133,7 @@ if __name__ == '__main__':
|
|||
jsonl_data = convert_org_entries_to_jsonl(entries, verbose=args.verbose)
|
||||
|
||||
# Compress JSONL formatted Data
|
||||
if args.no_compress:
|
||||
dump_jsonl(jsonl_data, get_absolute_path(args.jsonl_file), verbose=args.verbose)
|
||||
else:
|
||||
compress_jsonl_data(jsonl_data, get_absolute_path(args.jsonl_file), verbose=args.verbose)
|
||||
if args.output_file.suffix == ".gz":
|
||||
compress_jsonl_data(jsonl_data, args.output_file, verbose=args.verbose)
|
||||
elif args.output_file.suffix == ".jsonl":
|
||||
dump_jsonl(jsonl_data, args.output_file, verbose=args.verbose)
|
||||
|
|
|
@ -146,9 +146,9 @@ def get_absolute_path(filepath):
|
|||
|
||||
if __name__ == '__main__':
|
||||
# Setup Argument Parser
|
||||
parser = argparse.ArgumentParser(description="Map Org-Mode notes into JSONL format")
|
||||
parser.add_argument('--jsonl-file', '-j', required=True, type=pathlib.Path, help="Input file for compressed JSONL formatted notes to compute embeddings from")
|
||||
parser.add_argument('--embeddings-file', '-e', type=pathlib.Path, help="File to save/load model embeddings to/from. Default: ./embeddings.pt")
|
||||
parser = argparse.ArgumentParser(description="Map Org-Mode notes into (compressed) JSONL format")
|
||||
parser.add_argument('--compressed-jsonl', '-j', required=True, type=pathlib.Path, help="Input file for compressed JSONL formatted notes to compute embeddings from")
|
||||
parser.add_argument('--embeddings', '-e', required=True, type=pathlib.Path, help="File to save/load model embeddings to/from")
|
||||
parser.add_argument('--results-count', '-n', default=5, type=int, help="Number of results to render. Default: 5")
|
||||
parser.add_argument('--interactive', action='store_true', default=False, help="Interactive mode allows user to run queries on the model. Default: true")
|
||||
parser.add_argument('--verbose', action='store_true', default=False, help="Show verbose conversion logs. Default: false")
|
||||
|
@ -158,10 +158,10 @@ if __name__ == '__main__':
|
|||
bi_encoder, cross_encoder, top_k = initialize_model()
|
||||
|
||||
# Extract Entries
|
||||
entries = extract_entries(args.jsonl_file, args.verbose)
|
||||
entries = extract_entries(args.compressed_jsonl, args.verbose)
|
||||
|
||||
# Compute or Load Embeddings
|
||||
corpus_embeddings = compute_embeddings(entries, bi_encoder, args.embeddings_file, args.verbose)
|
||||
corpus_embeddings = compute_embeddings(entries, bi_encoder, args.embeddings, args.verbose)
|
||||
|
||||
# Run User Queries on Entries in Interactive Mode
|
||||
while args.interactive:
|
||||
|
|
Loading…
Reference in a new issue