[Multi-User Part 8]: Make conversation processor settings server-wide (#529)

- Rather than having each individual user configure their conversation settings, allow the server admin to configure the OpenAI API key or offline model once, and let all the users re-use that code.
- To configure the settings, the admin should go to the `django/admin` page and configure the relevant chat settings. To create an admin, run `python3 src/manage.py createsuperuser` and enter in the details. For simplicity, the email and username should match.
- Remove deprecated/unnecessary endpoints and views for configuring per-user chat settings
This commit is contained in:
sabaimran 2023-11-02 10:43:27 -07:00 committed by GitHub
parent 0fb81189ca
commit fe6720fa06
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 458 additions and 509 deletions

View file

@ -30,7 +30,8 @@ from database.models import (
Entry,
GithubRepoConfig,
Conversation,
ConversationProcessorConfig,
ChatModelOptions,
UserConversationConfig,
OpenAIProcessorConversationConfig,
OfflineChatProcessorConversationConfig,
)
@ -184,27 +185,42 @@ class ConversationAdapters:
@staticmethod
def has_any_conversation_config(user: KhojUser):
return ConversationProcessorConfig.objects.filter(user=user).exists()
return ChatModelOptions.objects.filter(user=user).exists()
@staticmethod
def get_openai_conversation_config(user: KhojUser):
return OpenAIProcessorConversationConfig.objects.filter(user=user).first()
def get_openai_conversation_config():
return OpenAIProcessorConversationConfig.objects.filter().first()
@staticmethod
def get_offline_chat_conversation_config(user: KhojUser):
return OfflineChatProcessorConversationConfig.objects.filter(user=user).first()
def get_offline_chat_conversation_config():
return OfflineChatProcessorConversationConfig.objects.filter().first()
@staticmethod
def has_valid_offline_conversation_config(user: KhojUser):
return OfflineChatProcessorConversationConfig.objects.filter(user=user, enable_offline_chat=True).exists()
def has_valid_offline_conversation_config():
return OfflineChatProcessorConversationConfig.objects.filter(enabled=True).exists()
@staticmethod
def has_valid_openai_conversation_config(user: KhojUser):
return OpenAIProcessorConversationConfig.objects.filter(user=user).exists()
def has_valid_openai_conversation_config():
return OpenAIProcessorConversationConfig.objects.filter().exists()
@staticmethod
async def aset_user_conversation_processor(user: KhojUser, conversation_processor_config_id: int):
config = await ChatModelOptions.objects.filter(id=conversation_processor_config_id).afirst()
if not config:
return None
new_config = await UserConversationConfig.objects.aupdate_or_create(user=user, defaults={"setting": config})
return new_config
@staticmethod
def get_conversation_config(user: KhojUser):
return ConversationProcessorConfig.objects.filter(user=user).first()
config = UserConversationConfig.objects.filter(user=user).first()
if not config:
return None
return config.setting
@staticmethod
def get_default_conversation_config():
return ChatModelOptions.objects.filter().first()
@staticmethod
def save_conversation(user: KhojUser, conversation_log: dict):
@ -215,75 +231,45 @@ class ConversationAdapters:
Conversation.objects.create(user=user, conversation_log=conversation_log)
@staticmethod
def set_conversation_processor_config(user: KhojUser, new_config: UserConversationProcessorConfig):
conversation_config, _ = ConversationProcessorConfig.objects.get_or_create(user=user)
conversation_config.max_prompt_size = new_config.max_prompt_size
conversation_config.tokenizer = new_config.tokenizer
conversation_config.save()
if new_config.openai:
default_values = {
"api_key": new_config.openai.api_key,
}
if new_config.openai.chat_model:
default_values["chat_model"] = new_config.openai.chat_model
OpenAIProcessorConversationConfig.objects.update_or_create(user=user, defaults=default_values)
if new_config.offline_chat:
default_values = {
"enable_offline_chat": str(new_config.offline_chat.enable_offline_chat),
}
if new_config.offline_chat.chat_model:
default_values["chat_model"] = new_config.offline_chat.chat_model
OfflineChatProcessorConversationConfig.objects.update_or_create(user=user, defaults=default_values)
def get_conversation_processor_options():
return ChatModelOptions.objects.all()
@staticmethod
def get_enabled_conversation_settings(user: KhojUser):
openai_config = ConversationAdapters.get_openai_conversation_config(user)
return {
"openai": True if openai_config is not None else False,
"offline_chat": ConversationAdapters.has_offline_chat(user),
}
def set_conversation_processor_config(user: KhojUser, new_config: ChatModelOptions):
user_conversation_config, _ = UserConversationConfig.objects.get_or_create(user=user)
user_conversation_config.setting = new_config
user_conversation_config.save()
@staticmethod
def clear_conversation_config(user: KhojUser):
ConversationProcessorConfig.objects.filter(user=user).delete()
ConversationAdapters.clear_openai_conversation_config(user)
ConversationAdapters.clear_offline_chat_conversation_config(user)
def has_offline_chat():
return OfflineChatProcessorConversationConfig.objects.filter(enabled=True).exists()
@staticmethod
def clear_openai_conversation_config(user: KhojUser):
OpenAIProcessorConversationConfig.objects.filter(user=user).delete()
async def ahas_offline_chat():
return await OfflineChatProcessorConversationConfig.objects.filter(enabled=True).aexists()
@staticmethod
def clear_offline_chat_conversation_config(user: KhojUser):
OfflineChatProcessorConversationConfig.objects.filter(user=user).delete()
async def get_offline_chat():
return await ChatModelOptions.objects.filter(model_type="offline").afirst()
@staticmethod
def has_offline_chat(user: KhojUser):
return OfflineChatProcessorConversationConfig.objects.filter(user=user, enable_offline_chat=True).exists()
async def aget_user_conversation_config(user: KhojUser):
config = await UserConversationConfig.objects.filter(user=user).prefetch_related("setting").afirst()
if not config:
return None
return config.setting
@staticmethod
async def ahas_offline_chat(user: KhojUser):
return await OfflineChatProcessorConversationConfig.objects.filter(
user=user, enable_offline_chat=True
).aexists()
async def has_openai_chat():
return await OpenAIProcessorConversationConfig.objects.filter().aexists()
@staticmethod
async def get_offline_chat(user: KhojUser):
return await OfflineChatProcessorConversationConfig.objects.filter(user=user).afirst()
async def get_openai_chat():
return await OpenAIProcessorConversationConfig.objects.filter().afirst()
@staticmethod
async def has_openai_chat(user: KhojUser):
return await OpenAIProcessorConversationConfig.objects.filter(user=user).aexists()
@staticmethod
async def get_openai_chat(user: KhojUser):
return await OpenAIProcessorConversationConfig.objects.filter(user=user).afirst()
async def aget_default_conversation_config():
return await ChatModelOptions.objects.filter().afirst()
class EntryAdapters:

View file

@ -3,6 +3,15 @@ from django.contrib.auth.admin import UserAdmin
# Register your models here.
from database.models import KhojUser
from database.models import (
KhojUser,
ChatModelOptions,
OpenAIProcessorConversationConfig,
OfflineChatProcessorConversationConfig,
)
admin.site.register(KhojUser, UserAdmin)
admin.site.register(ChatModelOptions)
admin.site.register(OpenAIProcessorConversationConfig)
admin.site.register(OfflineChatProcessorConversationConfig)

View file

@ -13,19 +13,6 @@ class Migration(migrations.Migration):
]
operations = [
migrations.CreateModel(
name="ConversationProcessorConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("conversation", models.JSONField()),
("enable_offline_chat", models.BooleanField(default=False)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="GithubConfig",
fields=[

View file

@ -6,7 +6,7 @@ import uuid
class Migration(migrations.Migration):
dependencies = [
("database", "0004_conversationprocessorconfig_githubconfig_and_more"),
("database", "0004_content_types_and_more"),
]
operations = [

View file

@ -0,0 +1,27 @@
# Generated by Django 4.2.5 on 2023-10-18 05:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("database", "0006_embeddingsdates"),
]
operations = [
migrations.CreateModel(
name="Conversation",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("conversation_log", models.JSONField()),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"abstract": False,
},
),
]

View file

@ -1,81 +0,0 @@
# Generated by Django 4.2.5 on 2023-10-18 05:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("database", "0006_embeddingsdates"),
]
operations = [
migrations.RemoveField(
model_name="conversationprocessorconfig",
name="conversation",
),
migrations.RemoveField(
model_name="conversationprocessorconfig",
name="enable_offline_chat",
),
migrations.AddField(
model_name="conversationprocessorconfig",
name="max_prompt_size",
field=models.IntegerField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name="conversationprocessorconfig",
name="tokenizer",
field=models.CharField(blank=True, default=None, max_length=200, null=True),
),
migrations.AddField(
model_name="conversationprocessorconfig",
name="user",
field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
preserve_default=False,
),
migrations.CreateModel(
name="OpenAIProcessorConversationConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("api_key", models.CharField(max_length=200)),
("chat_model", models.CharField(max_length=200)),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="OfflineChatProcessorConversationConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("enable_offline_chat", models.BooleanField(default=False)),
("chat_model", models.CharField(default="llama-2-7b-chat.ggmlv3.q4_0.bin", max_length=200)),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="Conversation",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("conversation_log", models.JSONField()),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"abstract": False,
},
),
]

View file

@ -5,7 +5,7 @@ from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("database", "0007_remove_conversationprocessorconfig_conversation_and_more"),
("database", "0007_add_conversation"),
]
operations = [

View file

@ -0,0 +1,83 @@
# Generated by Django 4.2.4 on 2023-11-01 17:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("database", "0009_khojapiuser"),
]
operations = [
migrations.CreateModel(
name="ChatModelOptions",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("max_prompt_size", models.IntegerField(blank=True, default=None, null=True)),
("tokenizer", models.CharField(blank=True, default=None, max_length=200, null=True)),
("chat_model", models.CharField(blank=True, default=None, max_length=200, null=True)),
(
"model_type",
models.CharField(
choices=[("openai", "Openai"), ("offline", "Offline")], default="openai", max_length=200
),
),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="OfflineChatProcessorConversationConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("enabled", models.BooleanField(default=False)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="OpenAIProcessorConversationConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("api_key", models.CharField(max_length=200)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="UserConversationConfig",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
(
"setting",
models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="database.chatmodeloptions",
),
),
(
"user",
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
],
options={
"abstract": False,
},
),
]

View file

@ -0,0 +1,12 @@
# Generated by Django 4.2.5 on 2023-11-02 01:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("database", "0010_chatmodeloptions_and_more"),
("database", "0010_rename_embeddings_entry_and_more"),
]
operations = []

View file

@ -93,20 +93,26 @@ class LocalPlaintextConfig(BaseModel):
class OpenAIProcessorConversationConfig(BaseModel):
api_key = models.CharField(max_length=200)
chat_model = models.CharField(max_length=200)
user = models.ForeignKey(KhojUser, on_delete=models.CASCADE)
class OfflineChatProcessorConversationConfig(BaseModel):
enable_offline_chat = models.BooleanField(default=False)
chat_model = models.CharField(max_length=200, default="llama-2-7b-chat.ggmlv3.q4_0.bin")
user = models.ForeignKey(KhojUser, on_delete=models.CASCADE)
enabled = models.BooleanField(default=False)
class ConversationProcessorConfig(BaseModel):
class ChatModelOptions(BaseModel):
class ModelType(models.TextChoices):
OPENAI = "openai"
OFFLINE = "offline"
max_prompt_size = models.IntegerField(default=None, null=True, blank=True)
tokenizer = models.CharField(max_length=200, default=None, null=True, blank=True)
user = models.ForeignKey(KhojUser, on_delete=models.CASCADE)
chat_model = models.CharField(max_length=200, default=None, null=True, blank=True)
model_type = models.CharField(max_length=200, choices=ModelType.choices, default=ModelType.OPENAI)
class UserConversationConfig(BaseModel):
user = models.OneToOneField(KhojUser, on_delete=models.CASCADE)
setting = models.ForeignKey(ChatModelOptions, on_delete=models.CASCADE, default=None, null=True, blank=True)
class Conversation(BaseModel):

View file

@ -12,6 +12,7 @@ import os
import schedule
from starlette.middleware.sessions import SessionMiddleware
from starlette.middleware.authentication import AuthenticationMiddleware
from starlette.requests import HTTPConnection
from starlette.authentication import (
AuthCredentials,
@ -60,7 +61,7 @@ class UserAuthenticationBackend(AuthenticationBackend):
password="default",
)
async def authenticate(self, request: Request):
async def authenticate(self, request: HTTPConnection):
current_user = request.session.get("user")
if current_user and current_user.get("email"):
user = await self.khojuser_manager.filter(email=current_user.get("email")).afirst()

View file

@ -234,6 +234,19 @@
height: 32px;
}
select#chat-models {
margin-bottom: 0;
}
div.api-settings {
width: 640px;
}
img.api-key-action:hover {
cursor: pointer;
}
@media screen and (max-width: 700px) {
.section-cards {
grid-template-columns: 1fr;
@ -268,6 +281,10 @@
div.khoj-header-wrapper {
grid-template-columns: auto;
}
div.api-settings {
width: auto;
}
}
</style>
</html>

View file

@ -10,9 +10,7 @@
<img class="card-icon" src="/static/assets/icons/github.svg" alt="Github">
<h3 class="card-title">
Github
{% if current_model_state.github == False %}
<img id="misconfigured-icon-github" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you just need to click Configure.">
{% else %}
{% if current_model_state.github == True %}
<img id="configured-icon-github" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
</h3>
@ -43,9 +41,7 @@
<img class="card-icon" src="/static/assets/icons/notion.svg" alt="Notion">
<h3 class="card-title">
Notion
{% if current_model_state.notion == False %}
<img id="misconfigured-icon-notion" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you just need to click Configure.">
{% else %}
{% if current_model_state.notion == True %}
<img id="configured-icon-notion" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
</h3>
@ -76,13 +72,9 @@
<img class="card-icon" src="/static/assets/icons/markdown.svg" alt="markdown">
<h3 class="card-title">
Markdown
{% if current_model_state.markdown %}
{% if current_model_state.markdown == False%}
<img id="misconfigured-icon-markdown" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you just need to click Configure.">
{% else %}
{% if current_model_state.markdown == True%}
<img id="configured-icon-markdown" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
{% endif %}
</h3>
</div>
<div class="card-description-row">
@ -111,13 +103,9 @@
<img class="card-icon" src="/static/assets/icons/org.svg" alt="org">
<h3 class="card-title">
Org
{% if current_model_state.org %}
{% if current_model_state.org == False %}
<img id="misconfigured-icon-org" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you just need to click Configure.">
{% else %}
{% if current_model_state.org == True %}
<img id="configured-icon-org" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
{% endif %}
</h3>
</div>
<div class="card-description-row">
@ -146,13 +134,9 @@
<img class="card-icon" src="/static/assets/icons/pdf.svg" alt="PDF">
<h3 class="card-title">
PDF
{% if current_model_state.pdf %}
{% if current_model_state.pdf == False %}
<img id="misconfigured-icon-pdf" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you need to click Configure.">
{% else %}
{% if current_model_state.pdf == True %}
<img id="configured-icon-pdf" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
{% endif %}
</h3>
</div>
<div class="card-description-row">
@ -181,13 +165,9 @@
<img class="card-icon" src="/static/assets/icons/plaintext.svg" alt="Plaintext">
<h3 class="card-title">
Plaintext
{% if current_model_state.plaintext %}
{% if current_model_state.plaintext == False %}
<img id="misconfigured-icon-plaintext" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="Embeddings have not been generated yet for this content type. Either the configuration is invalid, or you need to click Configure.">
{% else %}
{% if current_model_state.plaintext == True %}
<img id="configured-icon-plaintext" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
{% endif %}
</h3>
</div>
<div class="card-description-row">
@ -217,79 +197,37 @@
<h2 class="section-title">Features</h2>
<div id="features-hint-text"></div>
<div class="section-cards">
<div class="card">
<div class="card-title-row">
<img class="card-icon" src="/static/assets/icons/openai-logomark.svg" alt="Chat">
<h3 class="card-title">
Chat
{% if current_config.processor and current_config.processor.conversation.openai %}
{% if current_model_state.conversation_openai == False %}
<img id="misconfigured-icon-conversation-processor" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="The OpenAI configuration did not work as expected.">
{% else %}
<img id="configured-icon-conversation-processor" class="configured-icon" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% endif %}
{% endif %}
</h3>
</div>
<div class="card-description-row">
<p class="card-description">Setup online chat using OpenAI</p>
</div>
<div class="card-action-row">
<a class="card-button" href="/config/processor/conversation/openai">
{% if current_config.processor and current_config.processor.conversation.openai %}
Update
{% else %}
Setup
{% endif %}
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"><path d="M5 12h14M12 5l7 7-7 7"></path></svg>
</a>
</div>
{% if current_config.processor and current_config.processor.conversation.openai %}
<div id="clear-conversation" class="card-action-row">
<button class="card-button" onclick="clearConversationProcessor()">
Disable
</button>
</div>
{% endif %}
</div>
<div class="card">
<div class="card-title-row">
<img class="card-icon" src="/static/assets/icons/chat.svg" alt="Chat">
<h3 class="card-title">
Offline Chat
<img id="configured-icon-conversation-enable-offline-chat" class="configured-icon {% if current_model_state.enable_offline_model and current_model_state.conversation_gpt4all %}enabled{% else %}disabled{% endif %}" src="/static/assets/icons/confirm-icon.svg" alt="Configured">
{% if current_model_state.enable_offline_model and not current_model_state.conversation_gpt4all %}
<img id="misconfigured-icon-conversation-enable-offline-chat" class="configured-icon" src="/static/assets/icons/question-mark-icon.svg" alt="Not Configured" title="The model was not downloaded as expected.">
{% endif %}
Chat Model
</h3>
</div>
<div class="card-description-row">
<p class="card-description">Setup offline chat</p>
<select id="chat-models">
{% for option in conversation_options %}
<option value="{{ option.id }}" {% if option.id == selected_conversation_config %}selected{% endif %}>{{ option.chat_model }}</option>
{% endfor %}
</select>
</div>
<div id="clear-enable-offline-chat" class="card-action-row {% if current_model_state.enable_offline_model %}enabled{% else %}disabled{% endif %}">
<button class="card-button" onclick="toggleEnableLocalLLLM(false)">
Disable
<div class="card-action-row">
<button id="save-model" class="card-button happy" onclick="updateChatModel()">
Save
</button>
</div>
<div id="set-enable-offline-chat" class="card-action-row {% if current_model_state.enable_offline_model %}disabled{% else %}enabled{% endif %}">
<button class="card-button happy" onclick="toggleEnableLocalLLLM(true)">
Enable
</button>
</div>
<div id="toggle-enable-offline-chat" class="card-action-row disabled">
<div class="loader"></div>
</div>
</div>
</div>
</div>
<div class="section">
<h2 class="section-title">Clients</h2>
<div class="api-settings">
<div class="card-title-row">
<img class="card-icon" src="/static/assets/icons/key.svg" alt="API Key">
<h3 class="card-title">API Keys</h3>
</div>
<div class="card-description-row">
<p id="api-settings-card-description" class="card-description">Manage access to your Khoj from client apps</p>
<p id="api-settings-card-description" class="card-description">Manage access from your client apps to Khoj</p>
</div>
<table id="api-settings-keys-table">
<thead>
@ -328,13 +266,35 @@
</div>
<script>
function updateChatModel() {
const chatModel = document.getElementById("chat-models").value;
const saveModelButton = document.getElementById("save-model");
saveModelButton.disabled = true;
saveModelButton.innerHTML = "Saving...";
fetch('/api/config/data/conversation/model?id=' + chatModel, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
})
.then(response => response.json())
.then(data => {
if (data.status == "ok") {
saveModelButton.innerHTML = "Save";
saveModelButton.disabled = false;
} else {
saveModelButton.innerHTML = "Error";
saveModelButton.disabled = false;
}
})
};
function clearContentType(content_type) {
const csrfToken = document.cookie.split('; ').find(row => row.startsWith('csrftoken'))?.split('=')[1];
fetch('/api/delete/config/data/content_type/' + content_type, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-CSRFToken': csrfToken
}
})
.then(response => response.json())
@ -356,96 +316,6 @@
})
};
function toggleEnableLocalLLLM(enable) {
const csrfToken = document.cookie.split('; ').find(row => row.startsWith('csrftoken'))?.split('=')[1];
var toggleEnableLocalLLLMButton = document.getElementById("toggle-enable-offline-chat");
var featuresHintText = document.getElementById("features-hint-text");
toggleEnableLocalLLLMButton.classList.remove("disabled");
toggleEnableLocalLLLMButton.classList.add("enabled");
if (enable) {
featuresHintText.style.display = "block";
featuresHintText.innerHTML = "An open source model is being downloaded in the background. Hang tight, this may take a few minutes ⏳.";
featuresHintText.classList.add("show");
}
fetch('/api/config/data/processor/conversation/offline_chat' + '?enable_offline_chat=' + enable, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-CSRFToken': csrfToken
},
})
.then(response => response.json())
.then(data => {
if (data.status != "ok") {
featuresHintText.innerHTML = `🚨 Failed to ${enable ? "enable": "disable"} offline chat model! Inform server admins.`;
enable = !enable;
} else {
featuresHintText.classList.remove("show");
featuresHintText.innerHTML = "";
}
// Toggle the Enabled/Disabled UI based on the action/response.
var enableLocalLLLMButton = document.getElementById("set-enable-offline-chat");
var disableLocalLLLMButton = document.getElementById("clear-enable-offline-chat");
var configuredIcon = document.getElementById("configured-icon-conversation-enable-offline-chat");
var toggleEnableLocalLLLMButton = document.getElementById("toggle-enable-offline-chat");
toggleEnableLocalLLLMButton.classList.remove("enabled");
toggleEnableLocalLLLMButton.classList.add("disabled");
if (enable) {
enableLocalLLLMButton.classList.add("disabled");
enableLocalLLLMButton.classList.remove("enabled");
configuredIcon.classList.add("enabled");
configuredIcon.classList.remove("disabled");
disableLocalLLLMButton.classList.remove("disabled");
disableLocalLLLMButton.classList.add("enabled");
} else {
enableLocalLLLMButton.classList.remove("disabled");
enableLocalLLLMButton.classList.add("enabled");
configuredIcon.classList.remove("enabled");
configuredIcon.classList.add("disabled");
disableLocalLLLMButton.classList.add("disabled");
disableLocalLLLMButton.classList.remove("enabled");
}
})
}
function clearConversationProcessor() {
const csrfToken = document.cookie.split('; ').find(row => row.startsWith('csrftoken'))?.split('=')[1];
fetch('/api/delete/config/data/processor/conversation/openai', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-CSRFToken': csrfToken
}
})
.then(response => response.json())
.then(data => {
if (data.status == "ok") {
var conversationClearButton = document.getElementById("clear-conversation");
conversationClearButton.style.display = "none";
var configuredIcon = document.getElementById("configured-icon-conversation-processor");
if (configuredIcon) {
configuredIcon.style.display = "none";
}
var misconfiguredIcon = document.getElementById("misconfigured-icon-conversation-processor");
if (misconfiguredIcon) {
misconfiguredIcon.style.display = "none";
}
}
})
};
var configure = document.getElementById("configure");
configure.addEventListener("click", function(event) {
event.preventDefault();
@ -572,8 +442,8 @@
<td><b>${tokenName}</b></td>
<td id="api-key-${token}">${truncatedToken}</td>
<td>
<img onclick="copyAPIKey('${token}')" class="configured-icon enabled" src="/static/assets/icons/copy-solid.svg" alt="Copy API Key" title="Copy API Key">
<img onclick="deleteAPIKey('${token}')" class="configured-icon enabled" src="/static/assets/icons/trash-solid.svg" alt="Delete API Key" title="Delete API Key">
<img onclick="copyAPIKey('${token}')" class="configured-icon api-key-action enabled" src="/static/assets/icons/copy-solid.svg" alt="Copy API Key" title="Copy API Key">
<img onclick="deleteAPIKey('${token}')" class="configured-icon api-key-action enabled" src="/static/assets/icons/trash-solid.svg" alt="Delete API Key" title="Delete API Key">
</td>
</tr>
`;

View file

@ -0,0 +1,117 @@
"""
The application config currently looks like this:
app:
should-log-telemetry: true
content-type:
...
processor:
conversation:
conversation-logfile: ~/.khoj/processor/conversation/conversation_logs.json
max-prompt-size: null
offline-chat:
chat-model: llama-2-7b-chat.ggmlv3.q4_0.bin
enable-offline-chat: false
openai:
api-key: sk-blah
chat-model: gpt-3.5-turbo
tokenizer: null
search-type:
asymmetric:
cross-encoder: cross-encoder/ms-marco-MiniLM-L-6-v2
encoder: sentence-transformers/multi-qa-MiniLM-L6-cos-v1
encoder-type: null
model-directory: /Users/si/.khoj/search/asymmetric
image:
encoder: sentence-transformers/clip-ViT-B-32
encoder-type: null
model-directory: /Users/si/.khoj/search/image
symmetric:
cross-encoder: cross-encoder/ms-marco-MiniLM-L-6-v2
encoder: sentence-transformers/all-MiniLM-L6-v2
encoder-type: null
model-directory: ~/.khoj/search/symmetric
version: 0.12.4
The new version will looks like this:
app:
should-log-telemetry: true
processor:
conversation:
offline-chat:
enabled: false
openai:
api-key: sk-blah
chat-model-options:
- chat-model: gpt-3.5-turbo
tokenizer: null
type: openai
- chat-model: llama-2-7b-chat.ggmlv3.q4_0.bin
tokenizer: null
type: offline
search-type:
asymmetric:
cross-encoder: cross-encoder/ms-marco-MiniLM-L-6-v2
encoder: sentence-transformers/multi-qa-MiniLM-L6-cos-v1
image:
encoder: sentence-transformers/clip-ViT-B-32
encoder-type: null
model-directory: /Users/si/.khoj/search/image
version: 0.12.4
"""
import logging
from packaging import version
from khoj.utils.yaml import load_config_from_file, save_config_to_file
from database.models import (
OpenAIProcessorConversationConfig,
OfflineChatProcessorConversationConfig,
ChatModelOptions,
)
logger = logging.getLogger(__name__)
def migrate_server_pg(args):
schema_version = "0.14.0"
raw_config = load_config_from_file(args.config_file)
previous_version = raw_config.get("version")
if previous_version is None or version.parse(previous_version) < version.parse(schema_version):
logger.info(
f"Migrating configuration used for version {previous_version} to latest version for server with postgres in {args.version_no}"
)
raw_config["version"] = schema_version
if "processor" in raw_config and "conversation" in raw_config["processor"]:
processor_conversation = raw_config["processor"]["conversation"]
if "offline-chat" in raw_config["processor"]["conversation"]:
offline_chat = raw_config["processor"]["conversation"]["offline-chat"]
OfflineChatProcessorConversationConfig.objects.create(
enabled=offline_chat.get("enable-offline-chat"),
)
ChatModelOptions.objects.create(
chat_model=offline_chat.get("chat-model"),
tokenizer=processor_conversation.get("tokenizer"),
max_prompt_size=processor_conversation.get("max-prompt-size"),
model_type=ChatModelOptions.ModelType.OFFLINE,
)
if "openai" in raw_config["processor"]["conversation"]:
openai = raw_config["processor"]["conversation"]["openai"]
OpenAIProcessorConversationConfig.objects.create(
api_key=openai.get("api-key"),
)
ChatModelOptions.objects.create(
chat_model=openai.get("chat-model"),
tokenizer=processor_conversation.get("tokenizer"),
max_prompt_size=processor_conversation.get("max-prompt-size"),
model_type=ChatModelOptions.ModelType.OPENAI,
)
save_config_to_file(raw_config, args.config_file)
return args

View file

@ -255,7 +255,7 @@ help_message = PromptTemplate.from_template(
**/default**: Chat using your knowledge base and Khoj's general knowledge for context.
**/help**: Show this help message.
You are using the **{model}** model.
You are using the **{model}** model on the **{device}**.
**version**: {version}
""".strip()
)

View file

@ -5,7 +5,6 @@ import time
import logging
import json
from typing import List, Optional, Union, Any
import asyncio
# External Packages
from fastapi import APIRouter, HTTPException, Header, Request
@ -25,19 +24,16 @@ from khoj.utils.rawconfig import (
SearchConfig,
SearchResponse,
TextContentConfig,
OpenAIProcessorConfig,
GithubContentConfig,
NotionContentConfig,
ConversationProcessorConfig,
OfflineChatProcessorConfig,
)
from khoj.utils.state import SearchType
from khoj.utils import state, constants
from khoj.utils.helpers import AsyncIteratorWrapper
from khoj.utils.helpers import AsyncIteratorWrapper, get_device
from fastapi.responses import StreamingResponse, Response
from khoj.routers.helpers import (
get_conversation_command,
perform_chat_checks,
validate_conversation_config,
agenerate_chat_response,
update_telemetry_state,
is_ready_to_chat,
@ -113,8 +109,6 @@ async def map_config_to_db(config: FullConfig, user: KhojUser):
user=user,
token=config.content_type.notion.token,
)
if config.processor and config.processor.conversation:
ConversationAdapters.set_conversation_processor_config(user, config.processor.conversation)
# If it's a demo instance, prevent updating any of the configuration.
@ -246,26 +240,6 @@ if not state.demo:
enabled_content = await sync_to_async(EntryAdapters.get_unique_file_types)(user)
return {"status": "ok"}
@api.post("/delete/config/data/processor/conversation/openai", status_code=200)
@requires(["authenticated"])
async def remove_processor_conversation_config_data(
request: Request,
client: Optional[str] = None,
):
user = request.user.object
await sync_to_async(ConversationAdapters.clear_openai_conversation_config)(user)
update_telemetry_state(
request=request,
telemetry_type="api",
api="delete_processor_openai_config",
client=client,
metadata={"processor_conversation_type": "openai"},
)
return {"status": "ok"}
@api.post("/config/data/content_type/{content_type}", status_code=200)
@requires(["authenticated"])
async def set_content_config_data(
@ -291,70 +265,27 @@ if not state.demo:
return {"status": "ok"}
@api.post("/config/data/processor/conversation/openai", status_code=200)
@api.post("/config/data/conversation/model", status_code=200)
@requires(["authenticated"])
async def set_processor_openai_config_data(
async def update_chat_model(
request: Request,
updated_config: Union[OpenAIProcessorConfig, None],
id: str,
client: Optional[str] = None,
):
user = request.user.object
conversation_config = ConversationProcessorConfig(openai=updated_config)
await sync_to_async(ConversationAdapters.set_conversation_processor_config)(user, conversation_config)
new_config = await ConversationAdapters.aset_user_conversation_processor(user, int(id))
update_telemetry_state(
request=request,
telemetry_type="api",
api="set_processor_config",
api="set_conversation_chat_model",
client=client,
metadata={"processor_conversation_type": "conversation"},
)
return {"status": "ok"}
@api.post("/config/data/processor/conversation/offline_chat", status_code=200)
@requires(["authenticated"])
async def set_processor_enable_offline_chat_config_data(
request: Request,
enable_offline_chat: bool,
offline_chat_model: Optional[str] = None,
client: Optional[str] = None,
):
user = request.user.object
try:
if enable_offline_chat:
conversation_config = ConversationProcessorConfig(
offline_chat=OfflineChatProcessorConfig(
enable_offline_chat=enable_offline_chat,
chat_model=offline_chat_model,
)
)
await sync_to_async(ConversationAdapters.set_conversation_processor_config)(user, conversation_config)
offline_chat = await ConversationAdapters.get_offline_chat(user)
chat_model = offline_chat.chat_model
if state.gpt4all_processor_config is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
else:
await sync_to_async(ConversationAdapters.clear_offline_chat_conversation_config)(user)
state.gpt4all_processor_config = None
except Exception as e:
logger.error(f"Error updating offline chat config: {e}", exc_info=True)
return {"status": "error", "message": str(e)}
update_telemetry_state(
request=request,
telemetry_type="api",
api="set_processor_config",
client=client,
metadata={"processor_conversation_type": f"{'enable' if enable_offline_chat else 'disable'}_local_llm"},
)
if new_config is None:
return {"status": "error", "message": "Model not found"}
return {"status": "ok"}
@ -572,7 +503,7 @@ def chat_history(
host: Optional[str] = Header(None),
):
user = request.user.object
perform_chat_checks(user)
validate_conversation_config()
# Load Conversation History
meta_log = ConversationAdapters.get_conversation_by_user(user=user).conversation_log
@ -644,8 +575,11 @@ async def chat(
conversation_command = ConversationCommand.General
if conversation_command == ConversationCommand.Help:
model_type = "offline" if await ConversationAdapters.has_offline_chat(user) else "openai"
formatted_help = help_message.format(model=model_type, version=state.khoj_version)
conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
if conversation_config == None:
conversation_config = await ConversationAdapters.aget_default_conversation_config()
model_type = conversation_config.model_type
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
return StreamingResponse(iter([formatted_help]), media_type="text/event-stream", status_code=200)
# Get the (streamed) chat response from the LLM of choice.
@ -723,9 +657,9 @@ async def extract_references_and_questions(
# Infer search queries from user message
with timer("Extracting search queries took", logger):
# If we've reached here, either the user has enabled offline chat or the openai model is enabled.
if await ConversationAdapters.ahas_offline_chat(user):
if await ConversationAdapters.ahas_offline_chat():
using_offline_chat = True
offline_chat = await ConversationAdapters.get_offline_chat(user)
offline_chat = await ConversationAdapters.get_offline_chat()
chat_model = offline_chat.chat_model
if state.gpt4all_processor_config is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
@ -735,8 +669,8 @@ async def extract_references_and_questions(
inferred_queries = extract_questions_offline(
defiltered_query, loaded_model=loaded_model, conversation_log=meta_log, should_extract_questions=False
)
elif await ConversationAdapters.has_openai_chat(user):
openai_chat = await ConversationAdapters.get_openai_chat(user)
elif await ConversationAdapters.has_openai_chat():
openai_chat = await ConversationAdapters.get_openai_chat()
api_key = openai_chat.api_key
chat_model = openai_chat.chat_model
inferred_queries = extract_questions(

View file

@ -21,22 +21,25 @@ logger = logging.getLogger(__name__)
executor = ThreadPoolExecutor(max_workers=1)
def perform_chat_checks(user: KhojUser):
if ConversationAdapters.has_valid_offline_conversation_config(
user
) or ConversationAdapters.has_valid_openai_conversation_config(user):
def validate_conversation_config():
if (
ConversationAdapters.has_valid_offline_conversation_config()
or ConversationAdapters.has_valid_openai_conversation_config()
):
if ConversationAdapters.get_default_conversation_config() is None:
raise HTTPException(status_code=500, detail="Contact the server administrator to set a default chat model.")
return
raise HTTPException(status_code=500, detail="Set your OpenAI API key or enable Local LLM via Khoj settings.")
async def is_ready_to_chat(user: KhojUser):
has_offline_config = await ConversationAdapters.ahas_offline_chat(user=user)
has_openai_config = await ConversationAdapters.has_openai_chat(user=user)
has_offline_config = await ConversationAdapters.ahas_offline_chat()
has_openai_config = await ConversationAdapters.has_openai_chat()
user_conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
if has_offline_config:
offline_chat = await ConversationAdapters.get_offline_chat(user)
chat_model = offline_chat.chat_model
if has_offline_config and user_conversation_config and user_conversation_config.model_type == "offline":
chat_model = user_conversation_config.chat_model
if state.gpt4all_processor_config is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(chat_model=chat_model)
return True
@ -139,10 +142,12 @@ def generate_chat_response(
meta_log=meta_log,
)
offline_chat_config = ConversationAdapters.get_offline_chat_conversation_config(user=user)
offline_chat_config = ConversationAdapters.get_offline_chat_conversation_config()
conversation_config = ConversationAdapters.get_conversation_config(user)
openai_chat_config = ConversationAdapters.get_openai_conversation_config(user)
if offline_chat_config:
if conversation_config is None:
conversation_config = ConversationAdapters.get_default_conversation_config()
openai_chat_config = ConversationAdapters.get_openai_conversation_config()
if offline_chat_config and offline_chat_config.enabled and conversation_config.model_type == "offline":
if state.gpt4all_processor_config.loaded_model is None:
state.gpt4all_processor_config = GPT4AllProcessorModel(offline_chat_config.chat_model)
@ -154,14 +159,14 @@ def generate_chat_response(
conversation_log=meta_log,
completion_func=partial_completion,
conversation_command=conversation_command,
model=offline_chat_config.chat_model,
model=conversation_config.chat_model,
max_prompt_size=conversation_config.max_prompt_size,
tokenizer_name=conversation_config.tokenizer,
)
elif openai_chat_config:
elif openai_chat_config and conversation_config.model_type == "openai":
api_key = openai_chat_config.api_key
chat_model = openai_chat_config.chat_model
chat_model = conversation_config.chat_model
chat_response = converse(
compiled_references,
q,
@ -170,8 +175,8 @@ def generate_chat_response(
api_key=api_key,
completion_func=partial_completion,
conversation_command=conversation_command,
max_prompt_size=conversation_config.max_prompt_size if conversation_config else None,
tokenizer_name=conversation_config.tokenizer if conversation_config else None,
max_prompt_size=conversation_config.max_prompt_size,
tokenizer_name=conversation_config.tokenizer,
)
except Exception as e:

View file

@ -10,7 +10,6 @@ from fastapi.templating import Jinja2Templates
from starlette.authentication import requires
from khoj.utils.rawconfig import (
TextContentConfig,
OpenAIProcessorConfig,
FullConfig,
GithubContentConfig,
GithubRepoConfig,
@ -119,12 +118,6 @@ if not state.demo:
user = request.user.object
user_picture = request.session.get("user", {}).get("picture")
enabled_content = set(EntryAdapters.get_unique_file_types(user).all())
default_full_config = FullConfig(
content_type=None,
search_type=None,
processor=None,
)
current_config = state.config or json.loads(default_full_config.json())
successfully_configured = {
"pdf": ("pdf" in enabled_content),
@ -143,26 +136,26 @@ if not state.demo:
}
)
enabled_chat_config = ConversationAdapters.get_enabled_conversation_settings(user)
successfully_configured.update(
{
"conversation_openai": enabled_chat_config["openai"],
"enable_offline_model": enabled_chat_config["offline_chat"],
"conversation_gpt4all": state.gpt4all_processor_config.loaded_model is not None
if state.gpt4all_processor_config
else False,
}
conversation_options = ConversationAdapters.get_conversation_processor_options().all()
all_conversation_options = list()
for conversation_option in conversation_options:
all_conversation_options.append(
{"chat_model": conversation_option.chat_model, "id": conversation_option.id}
)
selected_conversation_config = ConversationAdapters.get_conversation_config(user)
return templates.TemplateResponse(
"config.html",
context={
"request": request,
"current_config": current_config,
"current_model_state": successfully_configured,
"anonymous_mode": state.anonymous_mode,
"username": user.username,
"username": user.username if user else None,
"conversation_options": all_conversation_options,
"selected_conversation_config": selected_conversation_config.id
if selected_conversation_config
else None,
"user_photo": user_picture,
},
)
@ -256,33 +249,3 @@ if not state.demo:
"user_photo": user_picture,
},
)
@web_client.get("/config/processor/conversation/openai", response_class=HTMLResponse)
@requires(["authenticated"], redirect="login_page")
def conversation_processor_config_page(request: Request):
user = request.user.object
user_picture = request.session.get("user", {}).get("picture")
openai_config = ConversationAdapters.get_openai_conversation_config(user)
if openai_config:
current_processor_openai_config = OpenAIProcessorConfig(
api_key=openai_config.api_key,
chat_model=openai_config.chat_model,
)
else:
current_processor_openai_config = OpenAIProcessorConfig(
api_key="",
chat_model="gpt-3.5-turbo",
)
current_processor_openai_config = json.loads(current_processor_openai_config.json())
return templates.TemplateResponse(
"processor_conversation_input.html",
context={
"request": request,
"current_config": current_processor_openai_config,
"username": user.username,
"user_photo": user_picture,
},
)

View file

@ -14,6 +14,7 @@ from khoj.migrations.migrate_version import migrate_config_to_version
from khoj.migrations.migrate_processor_config_openai import migrate_processor_conversation_schema
from khoj.migrations.migrate_offline_model import migrate_offline_model
from khoj.migrations.migrate_offline_chat_schema import migrate_offline_chat_schema
from khoj.migrations.migrate_server_pg import migrate_server_pg
def cli(args=None):
@ -75,6 +76,7 @@ def run_migrations(args):
migrate_processor_conversation_schema,
migrate_offline_model,
migrate_offline_chat_schema,
migrate_server_pg,
]
for migration in migrations:
args = migration(args)

View file

@ -39,9 +39,10 @@ from database.models import (
from tests.helpers import (
UserFactory,
ConversationProcessorConfigFactory,
ChatModelOptionsFactory,
OpenAIProcessorConversationConfigFactory,
OfflineChatProcessorConversationConfigFactory,
UserConversationProcessorConfigFactory,
)
@ -188,7 +189,9 @@ def chat_client(search_config: SearchConfig, default_user2: KhojUser):
# Initialize Processor from Config
if os.getenv("OPENAI_API_KEY"):
OpenAIProcessorConversationConfigFactory(user=default_user2)
chat_model = ChatModelOptionsFactory(chat_model="gpt-3.5-turbo", model_type="openai")
OpenAIProcessorConversationConfigFactory()
UserConversationProcessorConfigFactory(user=default_user2, setting=chat_model)
state.anonymous_mode = False
@ -257,7 +260,6 @@ def client(
user=api_user.user,
)
ConversationProcessorConfigFactory(user=api_user.user)
state.anonymous_mode = False
configure_routes(app)
@ -284,8 +286,8 @@ def client_offline_chat(search_config: SearchConfig, default_user2: KhojUser):
)
# Initialize Processor from Config
ConversationProcessorConfigFactory(user=default_user2)
OfflineChatProcessorConversationConfigFactory(user=default_user2)
OfflineChatProcessorConversationConfigFactory(enabled=True)
UserConversationProcessorConfigFactory(user=default_user2)
state.anonymous_mode = True

View file

@ -4,9 +4,10 @@ import os
from database.models import (
KhojUser,
KhojApiUser,
ConversationProcessorConfig,
ChatModelOptions,
OfflineChatProcessorConversationConfig,
OpenAIProcessorConversationConfig,
UserConversationConfig,
Conversation,
)
@ -30,20 +31,29 @@ class ApiUserFactory(factory.django.DjangoModelFactory):
token = factory.Faker("password")
class ConversationProcessorConfigFactory(factory.django.DjangoModelFactory):
class ChatModelOptionsFactory(factory.django.DjangoModelFactory):
class Meta:
model = ConversationProcessorConfig
model = ChatModelOptions
max_prompt_size = 2000
tokenizer = None
chat_model = "llama-2-7b-chat.ggmlv3.q4_0.bin"
model_type = "offline"
class UserConversationProcessorConfigFactory(factory.django.DjangoModelFactory):
class Meta:
model = UserConversationConfig
user = factory.SubFactory(UserFactory)
setting = factory.SubFactory(ChatModelOptionsFactory)
class OfflineChatProcessorConversationConfigFactory(factory.django.DjangoModelFactory):
class Meta:
model = OfflineChatProcessorConversationConfig
enable_offline_chat = True
chat_model = "llama-2-7b-chat.ggmlv3.q4_0.bin"
enabled = True
class OpenAIProcessorConversationConfigFactory(factory.django.DjangoModelFactory):
@ -51,7 +61,6 @@ class OpenAIProcessorConversationConfigFactory(factory.django.DjangoModelFactory
model = OpenAIProcessorConversationConfig
api_key = os.getenv("OPENAI_API_KEY")
chat_model = "gpt-3.5-turbo"
class ConversationFactory(factory.django.DjangoModelFactory):