diff --git a/documentation/docs/get-started/setup.mdx b/documentation/docs/get-started/setup.mdx index 29e5083e..3b2b8db5 100644 --- a/documentation/docs/get-started/setup.mdx +++ b/documentation/docs/get-started/setup.mdx @@ -175,7 +175,7 @@ To use the desktop client, you need to go to your Khoj server's settings page (h 1. Go to http://localhost:42110/server/admin and login with your admin credentials. 1. Go to [OpenAI settings](http://localhost:42110/server/admin/database/openaiprocessorconversationconfig/) in the server admin settings to add an OpenAI processor conversation config. This is where you set your API key. Alternatively, you can go to the [offline chat settings](http://localhost:42110/server/admin/database/offlinechatprocessorconversationconfig/) and simply create a new setting with `Enabled` set to `True`. 2. Go to the ChatModelOptions if you want to add additional models for chat. - - Set the `chat-model` field to a supported chat model[^1] of your choice. For example, you can specify `gpt-4` if you're using OpenAI or `mistral-7b-instruct-v0.1.Q4_0.gguf` if you're using offline chat. + - Set the `chat-model` field to a supported chat model[^1] of your choice. For example, you can specify `gpt-4-turbo-preview` if you're using OpenAI or `mistral-7b-instruct-v0.1.Q4_0.gguf` if you're using offline chat. - Make sure to set the `model-type` field to `OpenAI` or `Offline` respectively. - The `tokenizer` and `max-prompt-size` fields are optional. Set them only when using a non-standard model (i.e not mistral, gpt or llama2 model). 1. Select files and folders to index [using the desktop client](/get-started/setup#2-download-the-desktop-client). When you click 'Save', the files will be sent to your server for indexing. diff --git a/documentation/docs/miscellaneous/advanced.md b/documentation/docs/miscellaneous/advanced.md index d28b34d5..b2023c1b 100644 --- a/documentation/docs/miscellaneous/advanced.md +++ b/documentation/docs/miscellaneous/advanced.md @@ -35,7 +35,7 @@ Use structured query syntax to filter entries from your knowledge based used by Use this if you want to use non-standard, open or commercial, local or hosted LLM models for Khoj chat 1. Setup your desired chat LLM by installing an OpenAI compatible LLM API Server like [LiteLLM](https://docs.litellm.ai/docs/proxy/quick_start), [llama-cpp-python](https://github.com/abetlen/llama-cpp-python?tab=readme-ov-file#openai-compatible-web-server) 2. Set environment variable `OPENAI_API_BASE=""` before starting Khoj -3. Add ChatModelOptions with `model-type` `OpenAI`, and `chat-model` to anything (e.g `gpt-4`) during [Config](/get-started/setup#3-configure) +3. Add ChatModelOptions with `model-type` `OpenAI`, and `chat-model` to anything (e.g `gpt-3.5-turbo`) during [Config](/get-started/setup#3-configure) - *(Optional)* Set the `tokenizer` and `max-prompt-size` relevant to the actual chat model you're using #### Sample Setup using LiteLLM and Mistral API diff --git a/src/khoj/processor/conversation/openai/gpt.py b/src/khoj/processor/conversation/openai/gpt.py index d4b23824..d877b810 100644 --- a/src/khoj/processor/conversation/openai/gpt.py +++ b/src/khoj/processor/conversation/openai/gpt.py @@ -19,7 +19,7 @@ logger = logging.getLogger(__name__) def extract_questions( text, - model: Optional[str] = "gpt-4", + model: Optional[str] = "gpt-4-turbo-preview", conversation_log={}, api_key=None, temperature=0, diff --git a/src/khoj/utils/constants.py b/src/khoj/utils/constants.py index ec23cddd..b4c00df4 100644 --- a/src/khoj/utils/constants.py +++ b/src/khoj/utils/constants.py @@ -7,7 +7,7 @@ app_env_filepath = "~/.khoj/env" telemetry_server = "https://khoj.beta.haletic.com/v1/telemetry" content_directory = "~/.khoj/content/" default_offline_chat_model = "mistral-7b-instruct-v0.1.Q4_0.gguf" -default_online_chat_model = "gpt-4" +default_online_chat_model = "gpt-4-turbo-preview" empty_config = { "search-type": {