Throw unsupported error when server not configured for image, speech-to-text

This commit is contained in:
Debanjum Singh Solanky 2023-12-05 01:29:36 -05:00
parent 8f2f053968
commit 162b219f2b
5 changed files with 23 additions and 13 deletions

View file

@ -675,9 +675,13 @@
.then(response => response.ok ? response.json() : Promise.reject(response))
.then(data => { chatInput.value += data.text; })
.catch(err => {
err.status == 422
? flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
: flashStatusInChatInput("⛔️ Failed to transcribe audio")
if (err.status === 501) {
flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
} else if (err.status === 422) {
flashStatusInChatInput("⛔️ Audio file to large to process.")
} else {
flashStatusInChatInput("⛔️ Failed to transcribe audio.")
}
});
};

View file

@ -410,10 +410,12 @@ export class KhojChatModal extends Modal {
if (response.status === 200) {
console.log(response);
chatInput.value += response.json.text;
} else if (response.status === 422) {
throw new Error("⛔️ Failed to transcribe audio");
} else {
} else if (response.status === 501) {
throw new Error("⛔️ Configure speech-to-text model on server.");
} else if (response.status === 422) {
throw new Error("⛔️ Audio file to large to process.");
} else {
throw new Error("⛔️ Failed to transcribe audio.");
}
};

View file

@ -638,9 +638,13 @@ To get started, just start typing below. You can also type / to see a list of co
.then(response => response.ok ? response.json() : Promise.reject(response))
.then(data => { chatInput.value += data.text; })
.catch(err => {
err.status == 422
? flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
: flashStatusInChatInput("⛔️ Failed to transcribe audio")
if (err.status === 501) {
flashStatusInChatInput("⛔️ Configure speech-to-text model on server.")
} else if (err.status === 422) {
flashStatusInChatInput("⛔️ Audio file to large to process.")
} else {
flashStatusInChatInput("⛔️ Failed to transcribe audio.")
}
});
};

View file

@ -626,8 +626,8 @@ async def transcribe(request: Request, common: CommonQueryParams, file: UploadFi
speech_to_text_config = await ConversationAdapters.get_speech_to_text_config()
openai_chat_config = await ConversationAdapters.get_openai_chat_config()
if not speech_to_text_config:
# If the user has not configured a speech to text model, return an unprocessable entity error
status_code = 422
# If the user has not configured a speech to text model, return an unsupported on server error
status_code = 501
elif openai_chat_config and speech_to_text_config.model_type == ChatModelOptions.ModelType.OPENAI:
api_key = openai_chat_config.api_key
speech2text_model = speech_to_text_config.model_name

View file

@ -258,8 +258,8 @@ async def text_to_image(message: str) -> Tuple[Optional[str], int]:
text_to_image_config = await ConversationAdapters.aget_text_to_image_model_config()
openai_chat_config = await ConversationAdapters.get_openai_chat_config()
if not text_to_image_config:
# If the user has not configured a text to image model, return an unprocessable entity error
status_code = 422
# If the user has not configured a text to image model, return an unsupported on server error
status_code = 501
elif openai_chat_config and text_to_image_config.model_type == TextToImageModelConfig.ModelType.OPENAI:
client = openai.OpenAI(api_key=openai_chat_config.api_key)
text2image_model = text_to_image_config.model_name