diff --git a/src/interface/desktop/chat.html b/src/interface/desktop/chat.html
index 6b7fde07..db3b2c26 100644
--- a/src/interface/desktop/chat.html
+++ b/src/interface/desktop/chat.html
@@ -903,7 +903,8 @@
}
.input-row-button {
background: var(--background-color);
- border: none;
+ border: 1px solid var(--main-text-color);
+ box-shadow: 0 0 11px #aaa;
border-radius: 5px;
padding: 5px;
font-size: 14px;
@@ -989,7 +990,6 @@
color: var(--main-text-color);
border: 1px solid var(--main-text-color);
border-radius: 5px;
- padding: 5px;
font-size: 14px;
font-weight: 300;
line-height: 1.5em;
diff --git a/src/khoj/interface/web/chat.html b/src/khoj/interface/web/chat.html
index aa6bd4b9..4f8031ce 100644
--- a/src/khoj/interface/web/chat.html
+++ b/src/khoj/interface/web/chat.html
@@ -952,7 +952,7 @@ To get started, just start typing below. You can also type / to see a list of co
grid-template-columns: auto 32px 32px;
grid-column-gap: 10px;
grid-row-gap: 10px;
- background: #f9fafc
+ background: var(--background-color);
}
.option:hover {
box-shadow: 0 0 11px #aaa;
@@ -974,9 +974,9 @@ To get started, just start typing below. You can also type / to see a list of co
}
.input-row-button {
background: var(--background-color);
- border: none;
+ border: 1px solid var(--main-text-color);
+ box-shadow: 0 0 11px #aaa;
border-radius: 5px;
- padding: 5px;
font-size: 14px;
font-weight: 300;
line-height: 1.5em;
diff --git a/src/khoj/processor/conversation/prompts.py b/src/khoj/processor/conversation/prompts.py
index b0e316da..31cd9160 100644
--- a/src/khoj/processor/conversation/prompts.py
+++ b/src/khoj/processor/conversation/prompts.py
@@ -109,6 +109,18 @@ Question: {query}
""".strip()
)
+## Image Generation
+## --
+
+image_generation_improve_prompt = PromptTemplate.from_template(
+ """
+Generate a detailed prompt to generate an image based on the following description. Update the query below to improve the image generation. Add additional context to the query to improve the image generation.
+
+Query: {query}
+
+Improved Query:"""
+)
+
## Online Search Conversation
## --
online_search_conversation = PromptTemplate.from_template(
@@ -295,10 +307,13 @@ Q:"""
# --
help_message = PromptTemplate.from_template(
"""
-**/notes**: Chat using the information in your knowledge base.
-**/general**: Chat using just Khoj's general knowledge. This will not search against your notes.
-**/default**: Chat using your knowledge base and Khoj's general knowledge for context.
-**/help**: Show this help message.
+- **/notes**: Chat using the information in your knowledge base.
+- **/general**: Chat using just Khoj's general knowledge. This will not search against your notes.
+- **/default**: Chat using your knowledge base and Khoj's general knowledge for context.
+- **/online**: Chat using the internet as a source of information.
+- **/image**: Generate an image based on your message.
+- **/help**: Show this help message.
+
You are using the **{model}** model on the **{device}**.
**version**: {version}
diff --git a/src/khoj/routers/helpers.py b/src/khoj/routers/helpers.py
index d2a79e39..ed366590 100644
--- a/src/khoj/routers/helpers.py
+++ b/src/khoj/routers/helpers.py
@@ -146,6 +146,20 @@ async def generate_online_subqueries(q: str) -> List[str]:
return [q]
+async def generate_better_image_prompt(q: str) -> str:
+ """
+ Generate a better image prompt from the given query
+ """
+
+ image_prompt = prompts.image_generation_improve_prompt.format(
+ query=q,
+ )
+
+ response = await send_message_to_model_wrapper(image_prompt)
+
+ return response.strip()
+
+
async def send_message_to_model_wrapper(
message: str,
):
@@ -170,11 +184,13 @@ async def send_message_to_model_wrapper(
openai_chat_config = await ConversationAdapters.aget_openai_conversation_config()
api_key = openai_chat_config.api_key
chat_model = conversation_config.chat_model
- return send_message_to_model(
+ openai_response = send_message_to_model(
message=message,
api_key=api_key,
model=chat_model,
)
+
+ return openai_response.content
else:
raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -254,16 +270,16 @@ async def text_to_image(message: str) -> Tuple[Optional[str], int]:
status_code = 200
image = None
- # Send the audio data to the Whisper API
text_to_image_config = await ConversationAdapters.aget_text_to_image_model_config()
if not text_to_image_config:
# If the user has not configured a text to image model, return an unsupported on server error
status_code = 501
elif state.openai_client and text_to_image_config.model_type == TextToImageModelConfig.ModelType.OPENAI:
text2image_model = text_to_image_config.model_name
+ improved_image_prompt = await generate_better_image_prompt(message)
try:
response = state.openai_client.images.generate(
- prompt=message, model=text2image_model, response_format="b64_json"
+ prompt=improved_image_prompt, model=text2image_model, response_format="b64_json"
)
image = response.data[0].b64_json
except openai.OpenAIError as e: