mirror of
https://github.com/khoj-ai/khoj.git
synced 2024-11-27 17:35:07 +01:00
Batch upload telemetry data at regular interval instead of while querying
This commit is contained in:
parent
3ede919c66
commit
134cce9d32
4 changed files with 23 additions and 11 deletions
|
@ -3,6 +3,7 @@ import sys
|
|||
import logging
|
||||
import json
|
||||
from enum import Enum
|
||||
import requests
|
||||
|
||||
# External Packages
|
||||
import schedule
|
||||
|
@ -223,3 +224,18 @@ def save_chat_session():
|
|||
|
||||
state.processor_config.conversation.chat_session = None
|
||||
logger.info("📩 Saved current chat session to conversation logs")
|
||||
|
||||
|
||||
@schedule.repeat(schedule.every(1).minutes)
|
||||
def upload_telemetry():
|
||||
if not state.config.app.should_log_telemetry or not state.telemetry:
|
||||
print("No telemetry to upload") if not state.telemetry else print("Telemetry logging disabled")
|
||||
return
|
||||
|
||||
try:
|
||||
logger.debug(f"📡 Upload usage telemetry to {constants.telemetry_server}: {state.telemetry}")
|
||||
requests.post(constants.telemetry_server, json=state.telemetry)
|
||||
except Exception as e:
|
||||
logger.error(f"Error uploading telemetry: {e}")
|
||||
else:
|
||||
state.telemetry = None
|
||||
|
|
|
@ -168,7 +168,7 @@ def search(
|
|||
# Cache results
|
||||
state.query_cache[query_cache_key] = results
|
||||
|
||||
log_telemetry(telemetry_type="api", api="search", app_config=state.config.app)
|
||||
state.telemetry += [log_telemetry(telemetry_type="api", api="search", app_config=state.config.app)]
|
||||
|
||||
return results
|
||||
|
||||
|
@ -193,7 +193,7 @@ def update(t: Optional[SearchType] = None, force: Optional[bool] = False):
|
|||
else:
|
||||
logger.info("📬 Processor reconfigured via API")
|
||||
|
||||
log_telemetry(telemetry_type="api", api="update", app_config=state.config.app)
|
||||
state.telemetry += [log_telemetry(telemetry_type="api", api="update", app_config=state.config.app)]
|
||||
|
||||
return {"status": "ok", "message": "khoj reloaded"}
|
||||
|
||||
|
@ -255,6 +255,6 @@ def chat(q: Optional[str] = None):
|
|||
conversation_log=meta_log.get("chat", []),
|
||||
)
|
||||
|
||||
log_telemetry(telemetry_type="api", api="chat", app_config=state.config.app)
|
||||
state.telemetry += [log_telemetry(telemetry_type="api", api="chat", app_config=state.config.app)]
|
||||
|
||||
return {"status": status, "response": gpt_response, "context": compiled_references}
|
||||
|
|
|
@ -172,7 +172,7 @@ def log_telemetry(telemetry_type: str, api: str = None, client: str = None, app_
|
|||
"""Log basic app usage telemetry like client, os, api called"""
|
||||
# Do not log usage telemetry, if telemetry is disabled via app config
|
||||
if not app_config or not app_config.should_log_telemetry:
|
||||
return
|
||||
return []
|
||||
|
||||
# Populate telemetry data to log
|
||||
request_body = {
|
||||
|
@ -189,9 +189,4 @@ def log_telemetry(telemetry_type: str, api: str = None, client: str = None, app_
|
|||
request_body["client"] = client
|
||||
|
||||
# Log telemetry data to telemetry endpoint
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
logger.debug(f"Log usage telemetry to {constants.telemetry_server}: {request_body}")
|
||||
requests.post(constants.telemetry_server, json=request_body)
|
||||
except:
|
||||
pass
|
||||
return request_body
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Standard Packages
|
||||
import threading
|
||||
from typing import List
|
||||
from typing import List, Dict
|
||||
from packaging import version
|
||||
|
||||
# External Packages
|
||||
|
@ -25,6 +25,7 @@ cli_args: List[str] = None
|
|||
query_cache = LRU()
|
||||
search_index_lock = threading.Lock()
|
||||
SearchType = utils_config.SearchType
|
||||
telemetry: List[Dict[str, str]] = []
|
||||
|
||||
if torch.cuda.is_available():
|
||||
# Use CUDA GPU
|
||||
|
|
Loading…
Reference in a new issue