mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-27 17:35:07 +01:00
8b2af0b5ef
* Add support for gpt4all's falcon model as an additional conversation processor - Update the UI pages to allow the user to point to the new endpoints for GPT - Update the internal schemas to support both GPT4 models and OpenAI - Add unit tests benchmarking some of the Falcon performance * Add exc_info to include stack trace in error logs for text processors * Pull shared functions into utils.py to be used across gpt4 and gpt * Add migration for new processor conversation schema * Skip GPT4All actor tests due to typing issues * Fix Obsidian processor configuration in auto-configure flow * Rename enable_local_llm to enable_offline_chat
28 lines
813 B
YAML
28 lines
813 B
YAML
content-type:
|
|
org:
|
|
compressed-jsonl: .notes.json.gz
|
|
embeddings-file: .note_embeddings.pt
|
|
index-header-entries: true
|
|
input-files:
|
|
- ~/first_from_config.org
|
|
- ~/second_from_config.org
|
|
input-filter:
|
|
- '*.org'
|
|
- ~/notes/*.org
|
|
plugins:
|
|
content_plugin_1:
|
|
compressed-jsonl: content_plugin_1.jsonl.gz
|
|
embeddings-file: content_plugin_1_embeddings.pt
|
|
input-files:
|
|
- content_plugin_1_new.jsonl.gz
|
|
content_plugin_2:
|
|
compressed-jsonl: content_plugin_2.jsonl.gz
|
|
embeddings-file: content_plugin_2_embeddings.pt
|
|
input-filter:
|
|
- '*2_new.jsonl.gz'
|
|
enable-offline-chat: false
|
|
search-type:
|
|
asymmetric:
|
|
cross-encoder: cross-encoder/ms-marco-MiniLM-L-6-v2
|
|
encoder: sentence-transformers/msmarco-MiniLM-L-6-v3
|
|
version: 0.9.1.dev0
|