Update default chat model to Mistral in GPT4AllProcessor config

This commit is contained in:
Debanjum Singh Solanky 2023-11-26 03:46:01 -08:00
parent 4636390f7f
commit cc9eae5d18
2 changed files with 3 additions and 2 deletions

View file

@ -80,7 +80,7 @@ class GPT4AllProcessorConfig:
class GPT4AllProcessorModel:
def __init__(
self,
chat_model: str = "llama-2-7b-chat.ggmlv3.q4_0.bin",
chat_model: str = "mistral-7b-instruct-v0.1.Q4_0.gguf",
):
self.chat_model = chat_model
self.loaded_model = None

View file

@ -7,6 +7,7 @@ from collections import defaultdict
# External Packages
from pathlib import Path
from khoj.processor.embeddings import CrossEncoderModel, EmbeddingsModel
from whisper import Whisper
# Internal Packages
from khoj.utils import config as utils_config
@ -21,7 +22,7 @@ embeddings_model: EmbeddingsModel = None
cross_encoder_model: CrossEncoderModel = None
content_index = ContentIndex()
gpt4all_processor_config: GPT4AllProcessorModel = None
whisper_model = None
whisper_model: Whisper = None
config_file: Path = None
verbose: int = 0
host: str = None