mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-28 01:45:07 +01:00
Clean-up commented out code
This commit is contained in:
parent
79b1b1d350
commit
67a8795b1f
3 changed files with 31 additions and 46 deletions
|
@ -84,7 +84,6 @@
|
|||
function readStream() {
|
||||
reader.read().then(({ done, value }) => {
|
||||
if (done) {
|
||||
console.log("Stream complete");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -99,8 +98,6 @@
|
|||
new_response_text.innerHTML += polishedReference;
|
||||
} else {
|
||||
new_response_text.innerHTML += chunk;
|
||||
console.log(`Received ${chunk.length} bytes of data`);
|
||||
console.log(`Chunk: ${chunk}`);
|
||||
document.getElementById("chat-body").scrollTop = document.getElementById("chat-body").scrollHeight;
|
||||
readStream();
|
||||
}
|
||||
|
@ -108,14 +105,6 @@
|
|||
}
|
||||
readStream();
|
||||
});
|
||||
|
||||
|
||||
// fetch(url)
|
||||
// .then(data => {
|
||||
// // Render message by Khoj to chat body
|
||||
// console.log(data.response);
|
||||
// renderMessageWithReference(data.response, "khoj", data.context);
|
||||
// });
|
||||
}
|
||||
|
||||
function incrementalChat(event) {
|
||||
|
@ -428,6 +417,7 @@
|
|||
<script>
|
||||
var khojBannerSubmit = document.getElementById("khoj-banner-submit");
|
||||
|
||||
if (khojBannerSubmit != null) {
|
||||
khojBannerSubmit.addEventListener("click", function(event) {
|
||||
event.preventDefault();
|
||||
var email = document.getElementById("khoj-banner-email").value;
|
||||
|
@ -454,5 +444,6 @@
|
|||
document.getElementById("khoj-banner").innerHTML = "There was an error signing up. Please contact team@khoj.dev";
|
||||
});
|
||||
});
|
||||
}
|
||||
</script>
|
||||
</html>
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
import os
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from uuid import UUID
|
||||
import asyncio
|
||||
from typing import Any
|
||||
from threading import Thread
|
||||
import json
|
||||
|
||||
|
@ -12,10 +10,8 @@ import json
|
|||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.llms import OpenAI
|
||||
from langchain.schema import ChatMessage
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.callbacks import AsyncIteratorCallbackHandler
|
||||
from langchain.callbacks.base import BaseCallbackManager, AsyncCallbackHandler
|
||||
from langchain.callbacks.base import BaseCallbackManager
|
||||
import openai
|
||||
import tiktoken
|
||||
from tenacity import (
|
||||
|
@ -50,6 +46,7 @@ class ThreadedGenerator:
|
|||
item = self.queue.get()
|
||||
if item is StopIteration:
|
||||
if self.completion_func:
|
||||
# The completion func effective acts as a callback. It adds the aggregated response to the conversation history. It's constructed in api.py.
|
||||
self.completion_func(gpt_response=self.response)
|
||||
raise StopIteration
|
||||
return item
|
||||
|
|
|
@ -398,7 +398,6 @@ def update(
|
|||
@api.get("/chat/init")
|
||||
def chat_init(
|
||||
request: Request,
|
||||
q: Optional[str] = None,
|
||||
client: Optional[str] = None,
|
||||
user_agent: Optional[str] = Header(None),
|
||||
referer: Optional[str] = Header(None),
|
||||
|
@ -429,8 +428,6 @@ def chat_init(
|
|||
)
|
||||
]
|
||||
|
||||
# If user query is empty, return chat history
|
||||
if not q:
|
||||
return {"status": "ok", "response": meta_log.get("chat", [])}
|
||||
|
||||
|
||||
|
@ -474,7 +471,7 @@ async def chat(
|
|||
chat_session = state.processor_config.conversation.chat_session
|
||||
meta_log = state.processor_config.conversation.meta_log
|
||||
|
||||
# If user query is empty, return chat history
|
||||
# If user query is empty, return nothing
|
||||
if not q:
|
||||
return StreamingResponse(None)
|
||||
|
||||
|
|
Loading…
Reference in a new issue