mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-23 23:48:56 +01:00
Improve the admin experience, add more metadata to the list_display
- Don't propagate max_tokens to the openai chat completion method. the max for the newer models is fixed at 4096 max output. The token limit is just used for input
This commit is contained in:
parent
01cdc54ad0
commit
9ebf3a4d80
4 changed files with 32 additions and 7 deletions
|
@ -78,10 +78,8 @@ class KhojUserAdmin(UserAdmin):
|
||||||
|
|
||||||
admin.site.register(KhojUser, KhojUserAdmin)
|
admin.site.register(KhojUser, KhojUserAdmin)
|
||||||
|
|
||||||
admin.site.register(ChatModelOptions)
|
|
||||||
admin.site.register(ProcessLock)
|
admin.site.register(ProcessLock)
|
||||||
admin.site.register(SpeechToTextModelOptions)
|
admin.site.register(SpeechToTextModelOptions)
|
||||||
admin.site.register(OpenAIProcessorConversationConfig)
|
|
||||||
admin.site.register(SearchModelConfig)
|
admin.site.register(SearchModelConfig)
|
||||||
admin.site.register(ReflectiveQuestion)
|
admin.site.register(ReflectiveQuestion)
|
||||||
admin.site.register(UserSearchModelConfig)
|
admin.site.register(UserSearchModelConfig)
|
||||||
|
@ -89,7 +87,6 @@ admin.site.register(TextToImageModelConfig)
|
||||||
admin.site.register(ClientApplication)
|
admin.site.register(ClientApplication)
|
||||||
admin.site.register(GithubConfig)
|
admin.site.register(GithubConfig)
|
||||||
admin.site.register(NotionConfig)
|
admin.site.register(NotionConfig)
|
||||||
admin.site.register(ServerChatSettings)
|
|
||||||
|
|
||||||
|
|
||||||
@admin.register(Agent)
|
@admin.register(Agent)
|
||||||
|
@ -131,6 +128,36 @@ class KhojUserSubscription(admin.ModelAdmin):
|
||||||
list_filter = ("type",)
|
list_filter = ("type",)
|
||||||
|
|
||||||
|
|
||||||
|
@admin.register(ChatModelOptions)
|
||||||
|
class ChatModelOptionsAdmin(admin.ModelAdmin):
|
||||||
|
list_display = (
|
||||||
|
"id",
|
||||||
|
"chat_model",
|
||||||
|
"model_type",
|
||||||
|
"max_prompt_size",
|
||||||
|
)
|
||||||
|
search_fields = ("id", "chat_model", "model_type")
|
||||||
|
|
||||||
|
|
||||||
|
@admin.register(OpenAIProcessorConversationConfig)
|
||||||
|
class OpenAIProcessorConversationConfigAdmin(admin.ModelAdmin):
|
||||||
|
list_display = (
|
||||||
|
"id",
|
||||||
|
"name",
|
||||||
|
"api_key",
|
||||||
|
"api_base_url",
|
||||||
|
)
|
||||||
|
search_fields = ("id", "name", "api_key", "api_base_url")
|
||||||
|
|
||||||
|
|
||||||
|
@admin.register(ServerChatSettings)
|
||||||
|
class ServerChatSettingsAdmin(admin.ModelAdmin):
|
||||||
|
list_display = (
|
||||||
|
"default_model",
|
||||||
|
"summarizer_model",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@admin.register(Conversation)
|
@admin.register(Conversation)
|
||||||
class ConversationAdmin(admin.ModelAdmin):
|
class ConversationAdmin(admin.ModelAdmin):
|
||||||
list_display = (
|
list_display = (
|
||||||
|
|
|
@ -67,7 +67,6 @@ def extract_questions(
|
||||||
messages=messages,
|
messages=messages,
|
||||||
model=model,
|
model=model,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
|
||||||
api_base_url=api_base_url,
|
api_base_url=api_base_url,
|
||||||
model_kwargs={"response_format": {"type": "json_object"}},
|
model_kwargs={"response_format": {"type": "json_object"}},
|
||||||
openai_api_key=api_key,
|
openai_api_key=api_key,
|
||||||
|
|
|
@ -34,7 +34,7 @@ openai_clients: Dict[str, openai.OpenAI] = {}
|
||||||
reraise=True,
|
reraise=True,
|
||||||
)
|
)
|
||||||
def completion_with_backoff(
|
def completion_with_backoff(
|
||||||
messages, model, temperature=0, openai_api_key=None, api_base_url=None, model_kwargs=None, max_tokens=None
|
messages, model, temperature=0, openai_api_key=None, api_base_url=None, model_kwargs=None
|
||||||
) -> str:
|
) -> str:
|
||||||
client_key = f"{openai_api_key}--{api_base_url}"
|
client_key = f"{openai_api_key}--{api_base_url}"
|
||||||
client: openai.OpenAI = openai_clients.get(client_key)
|
client: openai.OpenAI = openai_clients.get(client_key)
|
||||||
|
@ -53,7 +53,6 @@ def completion_with_backoff(
|
||||||
model=model, # type: ignore
|
model=model, # type: ignore
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
timeout=20,
|
timeout=20,
|
||||||
max_tokens=max_tokens,
|
|
||||||
**(model_kwargs or dict()),
|
**(model_kwargs or dict()),
|
||||||
)
|
)
|
||||||
aggregated_response = ""
|
aggregated_response = ""
|
||||||
|
|
|
@ -993,7 +993,7 @@ def scheduled_chat(
|
||||||
|
|
||||||
# Stop if the chat API call was not successful
|
# Stop if the chat API call was not successful
|
||||||
if raw_response.status_code != 200:
|
if raw_response.status_code != 200:
|
||||||
logger.error(f"Failed to run schedule chat: {raw_response.text}")
|
logger.error(f"Failed to run schedule chat: {raw_response.text}, user: {user}, query: {query_to_run}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Extract the AI response from the chat API response
|
# Extract the AI response from the chat API response
|
||||||
|
|
Loading…
Reference in a new issue