diff --git a/src/khoj/interface/web/chat.html b/src/khoj/interface/web/chat.html index 7d93c836..d5d55956 100644 --- a/src/khoj/interface/web/chat.html +++ b/src/khoj/interface/web/chat.html @@ -84,7 +84,6 @@ function readStream() { reader.read().then(({ done, value }) => { if (done) { - console.log("Stream complete"); return; } @@ -99,8 +98,6 @@ new_response_text.innerHTML += polishedReference; } else { new_response_text.innerHTML += chunk; - console.log(`Received ${chunk.length} bytes of data`); - console.log(`Chunk: ${chunk}`); document.getElementById("chat-body").scrollTop = document.getElementById("chat-body").scrollHeight; readStream(); } @@ -108,14 +105,6 @@ } readStream(); }); - - - // fetch(url) - // .then(data => { - // // Render message by Khoj to chat body - // console.log(data.response); - // renderMessageWithReference(data.response, "khoj", data.context); - // }); } function incrementalChat(event) { @@ -428,31 +417,33 @@ diff --git a/src/khoj/processor/conversation/utils.py b/src/khoj/processor/conversation/utils.py index 4305999f..6f6f1b70 100644 --- a/src/khoj/processor/conversation/utils.py +++ b/src/khoj/processor/conversation/utils.py @@ -2,9 +2,7 @@ import os import logging from datetime import datetime -from typing import Any, Optional -from uuid import UUID -import asyncio +from typing import Any from threading import Thread import json @@ -12,10 +10,8 @@ import json from langchain.chat_models import ChatOpenAI from langchain.llms import OpenAI from langchain.schema import ChatMessage -from langchain.callbacks.base import BaseCallbackHandler from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler -from langchain.callbacks import AsyncIteratorCallbackHandler -from langchain.callbacks.base import BaseCallbackManager, AsyncCallbackHandler +from langchain.callbacks.base import BaseCallbackManager import openai import tiktoken from tenacity import ( @@ -50,6 +46,7 @@ class ThreadedGenerator: item = self.queue.get() if item is StopIteration: if self.completion_func: + # The completion func effective acts as a callback. It adds the aggregated response to the conversation history. It's constructed in api.py. self.completion_func(gpt_response=self.response) raise StopIteration return item diff --git a/src/khoj/routers/api.py b/src/khoj/routers/api.py index d1fbbdb5..87f428da 100644 --- a/src/khoj/routers/api.py +++ b/src/khoj/routers/api.py @@ -398,7 +398,6 @@ def update( @api.get("/chat/init") def chat_init( request: Request, - q: Optional[str] = None, client: Optional[str] = None, user_agent: Optional[str] = Header(None), referer: Optional[str] = Header(None), @@ -429,9 +428,7 @@ def chat_init( ) ] - # If user query is empty, return chat history - if not q: - return {"status": "ok", "response": meta_log.get("chat", [])} + return {"status": "ok", "response": meta_log.get("chat", [])} @api.get("/chat", response_class=StreamingResponse) @@ -474,7 +471,7 @@ async def chat( chat_session = state.processor_config.conversation.chat_session meta_log = state.processor_config.conversation.meta_log - # If user query is empty, return chat history + # If user query is empty, return nothing if not q: return StreamingResponse(None)