2023-10-26 18:37:41 +00:00
|
|
|
import os
|
2024-01-11 20:02:46 +00:00
|
|
|
from datetime import datetime
|
2023-10-26 18:37:41 +00:00
|
|
|
|
2023-12-28 12:34:02 +00:00
|
|
|
import factory
|
2024-01-11 20:02:46 +00:00
|
|
|
from django.utils.timezone import make_aware
|
2023-12-28 12:34:02 +00:00
|
|
|
|
2023-11-21 18:56:04 +00:00
|
|
|
from khoj.database.models import (
|
2023-11-02 17:43:27 +00:00
|
|
|
ChatModelOptions,
|
2023-12-28 12:34:02 +00:00
|
|
|
Conversation,
|
|
|
|
KhojApiUser,
|
|
|
|
KhojUser,
|
2023-10-26 18:37:41 +00:00
|
|
|
OpenAIProcessorConversationConfig,
|
2024-04-17 07:52:41 +00:00
|
|
|
ProcessLock,
|
2023-11-16 01:12:54 +00:00
|
|
|
SearchModelConfig,
|
2023-11-11 06:38:28 +00:00
|
|
|
Subscription,
|
2023-12-28 12:34:02 +00:00
|
|
|
UserConversationConfig,
|
2023-10-26 18:37:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class UserFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = KhojUser
|
|
|
|
|
|
|
|
username = factory.Faker("name")
|
|
|
|
email = factory.Faker("email")
|
|
|
|
password = factory.Faker("password")
|
|
|
|
uuid = factory.Faker("uuid4")
|
|
|
|
|
|
|
|
|
2023-10-26 19:33:03 +00:00
|
|
|
class ApiUserFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = KhojApiUser
|
|
|
|
|
|
|
|
user = None
|
|
|
|
name = factory.Faker("name")
|
|
|
|
token = factory.Faker("password")
|
|
|
|
|
|
|
|
|
2024-06-09 01:46:55 +00:00
|
|
|
class OpenAIProcessorConversationConfigFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = OpenAIProcessorConversationConfig
|
|
|
|
|
|
|
|
api_key = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
|
|
|
2023-11-02 17:43:27 +00:00
|
|
|
class ChatModelOptionsFactory(factory.django.DjangoModelFactory):
|
2023-10-26 18:37:41 +00:00
|
|
|
class Meta:
|
2023-11-02 17:43:27 +00:00
|
|
|
model = ChatModelOptions
|
2023-10-26 18:37:41 +00:00
|
|
|
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 20:19:44 +00:00
|
|
|
max_prompt_size = 3500
|
2023-10-26 18:37:41 +00:00
|
|
|
tokenizer = None
|
Use llama.cpp for offline chat models
- Benefits of moving to llama-cpp-python from gpt4all:
- Support for all GGUF format chat models
- Support for AMD, Nvidia, Mac, Vulcan GPU machines (instead of just Vulcan, Mac)
- Supports models with more capabilities like tools, schema
enforcement, speculative ddecoding, image gen etc.
- Upgrade default chat model, prompt size, tokenizer for new supported
chat models
- Load offline chat model when present on disk without requiring internet
- Load model onto GPU if not disabled and device has GPU
- Load model onto CPU if loading model onto GPU fails
- Create helper function to check and load model from disk, when model
glob is present on disk.
`Llama.from_pretrained' needs internet to get repo info from
HuggingFace. This isn't required, if the model is already downloaded
Didn't find any existing HF or llama.cpp method that looked for model
glob on disk without internet
2024-03-15 20:19:44 +00:00
|
|
|
chat_model = "NousResearch/Hermes-2-Pro-Mistral-7B-GGUF"
|
2023-11-02 17:43:27 +00:00
|
|
|
model_type = "offline"
|
2024-06-18 14:01:07 +00:00
|
|
|
openai_config = factory.LazyAttribute(
|
|
|
|
lambda obj: OpenAIProcessorConversationConfigFactory() if os.getenv("OPENAI_API_KEY") else None
|
|
|
|
)
|
2023-11-02 17:43:27 +00:00
|
|
|
|
|
|
|
|
|
|
|
class UserConversationProcessorConfigFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = UserConversationConfig
|
|
|
|
|
|
|
|
user = factory.SubFactory(UserFactory)
|
|
|
|
setting = factory.SubFactory(ChatModelOptionsFactory)
|
2023-10-26 18:37:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ConversationFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = Conversation
|
|
|
|
|
|
|
|
user = factory.SubFactory(UserFactory)
|
2023-11-11 06:38:28 +00:00
|
|
|
|
|
|
|
|
2023-11-15 00:56:26 +00:00
|
|
|
class SearchModelFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
2023-11-16 01:12:54 +00:00
|
|
|
model = SearchModelConfig
|
2023-11-15 00:56:26 +00:00
|
|
|
|
|
|
|
name = "default"
|
|
|
|
model_type = "text"
|
|
|
|
bi_encoder = "thenlper/gte-small"
|
2024-04-24 03:43:14 +00:00
|
|
|
cross_encoder = "mixedbread-ai/mxbai-rerank-xsmall-v1"
|
2023-11-15 00:56:26 +00:00
|
|
|
|
|
|
|
|
2023-11-11 06:38:28 +00:00
|
|
|
class SubscriptionFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = Subscription
|
|
|
|
|
|
|
|
user = factory.SubFactory(UserFactory)
|
2023-11-15 00:20:55 +00:00
|
|
|
type = "standard"
|
2023-11-11 06:38:28 +00:00
|
|
|
is_recurring = False
|
2024-01-11 20:02:46 +00:00
|
|
|
renewal_date = make_aware(datetime.strptime("2100-04-01", "%Y-%m-%d"))
|
2024-04-17 07:52:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ProcessLockFactory(factory.django.DjangoModelFactory):
|
|
|
|
class Meta:
|
|
|
|
model = ProcessLock
|
|
|
|
|
|
|
|
name = "test_lock"
|