Auto-update: Sat Jun 29 11:58:22 PDT 2024
This commit is contained in:
parent
7a064cd21d
commit
f019d4ede0
16 changed files with 359 additions and 103 deletions
|
@ -9,27 +9,22 @@ from dateutil import tz
|
|||
from pathlib import Path
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional
|
||||
import traceback
|
||||
import logging
|
||||
from .logs import Logger
|
||||
from .classes import AutoResponder, IMAPConfig, SMTPConfig, EmailAccount, EmailContact, IncomingEmail, Database, Geocoder
|
||||
|
||||
# from sijapi.config.config import load_config
|
||||
# cfg = load_config()
|
||||
from .classes import AutoResponder, IMAPConfig, SMTPConfig, EmailAccount, EmailContact, IncomingEmail, Database, Geocoder, APIConfig, Configuration
|
||||
|
||||
### Initial initialization
|
||||
BASE_DIR = Path(__file__).resolve().parent
|
||||
CONFIG_DIR = BASE_DIR / "config"
|
||||
ENV_PATH = CONFIG_DIR / ".env"
|
||||
LOGS_DIR = BASE_DIR / "logs"
|
||||
|
||||
# Create logger instance
|
||||
L = Logger("Central", LOGS_DIR)
|
||||
|
||||
os.makedirs(LOGS_DIR, exist_ok=True)
|
||||
load_dotenv(ENV_PATH)
|
||||
|
||||
### API essentials
|
||||
API_CONFIG_PATH = CONFIG_DIR / "api.yaml"
|
||||
SECRETS_CONFIG_PATH = CONFIG_DIR / "secrets.yaml"
|
||||
API = APIConfig.load_from_yaml(API_CONFIG_PATH, SECRETS_CONFIG_PATH)
|
||||
DB = Database.from_env()
|
||||
ROUTERS = os.getenv('ROUTERS', '').split(',')
|
||||
PUBLIC_SERVICES = os.getenv('PUBLIC_SERVICES', '').split(',')
|
||||
|
|
|
@ -18,9 +18,8 @@ from dotenv import load_dotenv
|
|||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import argparse
|
||||
from . import L, LOGS_DIR, OBSIDIAN_VAULT_DIR
|
||||
from . import L, API, OBSIDIAN_VAULT_DIR
|
||||
from .logs import Logger
|
||||
from .utilities import list_and_correct_impermissible_files
|
||||
|
||||
parser = argparse.ArgumentParser(description='Personal API.')
|
||||
parser.add_argument('--debug', action='store_true', help='Set log level to L.INFO')
|
||||
|
@ -106,6 +105,7 @@ async def handle_exception_middleware(request: Request, call_next):
|
|||
|
||||
|
||||
|
||||
|
||||
def load_router(router_name):
|
||||
router_file = ROUTER_DIR / f'{router_name}.py'
|
||||
L.DEBUG(f"Attempting to load {router_name.capitalize()}...")
|
||||
|
@ -127,15 +127,15 @@ def main(argv):
|
|||
else:
|
||||
L.CRIT(f"sijapi launched")
|
||||
L.CRIT(f"{args._get_args}")
|
||||
for router_name in ROUTERS:
|
||||
load_router(router_name)
|
||||
|
||||
journal = OBSIDIAN_VAULT_DIR / "journal"
|
||||
list_and_correct_impermissible_files(journal, rename=True)
|
||||
for module_name in API.MODULES.__fields__:
|
||||
if getattr(API.MODULES, module_name):
|
||||
load_router(module_name)
|
||||
|
||||
config = Config()
|
||||
config.keep_alive_timeout = 1200
|
||||
config.bind = [HOST]
|
||||
config.bind = [API.BIND]
|
||||
asyncio.run(serve(api, config))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1:])
|
|
@ -3,6 +3,8 @@ from typing import List, Optional, Any, Tuple, Dict, Union, Tuple
|
|||
from datetime import datetime, timedelta, timezone
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
import math
|
||||
from timezonefinder import TimezoneFinder
|
||||
|
@ -15,6 +17,160 @@ from concurrent.futures import ThreadPoolExecutor
|
|||
import reverse_geocoder as rg
|
||||
from timezonefinder import TimezoneFinder
|
||||
from srtm import get_data
|
||||
from pathlib import Path
|
||||
import yaml
|
||||
from typing import Union, List, TypeVar, Type
|
||||
from pydantic import BaseModel, create_model
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Dict
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
T = TypeVar('T', bound='Configuration')
|
||||
|
||||
class ModulesConfig(BaseModel):
|
||||
asr: bool = Field(alias="asr")
|
||||
calendar: bool = Field(alias="calendar")
|
||||
email: bool = Field(alias="email")
|
||||
health: bool = Field(alias="health")
|
||||
hooks: bool = Field(alias="hooks")
|
||||
llm: bool = Field(alias="llm")
|
||||
locate: bool = Field(alias="locate")
|
||||
note: bool = Field(alias="note")
|
||||
sd: bool = Field(alias="sd")
|
||||
serve: bool = Field(alias="serve")
|
||||
time: bool = Field(alias="time")
|
||||
tts: bool = Field(alias="tts")
|
||||
weather: bool = Field(alias="weather")
|
||||
|
||||
|
||||
class APIConfig(BaseModel):
|
||||
BIND: str
|
||||
PORT: int
|
||||
URL: str
|
||||
PUBLIC: List[str]
|
||||
TRUSTED_SUBNETS: List[str]
|
||||
MODULES: ModulesConfig
|
||||
BaseTZ: Optional[str] = 'UTC'
|
||||
KEYS: List[str]
|
||||
|
||||
@classmethod
|
||||
def load_from_yaml(cls, config_path: Path, secrets_path: Path):
|
||||
# Load main configuration
|
||||
with open(config_path, 'r') as file:
|
||||
config_data = yaml.safe_load(file)
|
||||
|
||||
print(f"Loaded main config: {config_data}") # Debug print
|
||||
|
||||
# Load secrets
|
||||
try:
|
||||
with open(secrets_path, 'r') as file:
|
||||
secrets_data = yaml.safe_load(file)
|
||||
print(f"Loaded secrets: {secrets_data}") # Debug print
|
||||
except FileNotFoundError:
|
||||
print(f"Secrets file not found: {secrets_path}")
|
||||
secrets_data = {}
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing secrets YAML: {e}")
|
||||
secrets_data = {}
|
||||
|
||||
# Handle KEYS placeholder
|
||||
if isinstance(config_data.get('KEYS'), list) and len(config_data['KEYS']) == 1:
|
||||
placeholder = config_data['KEYS'][0]
|
||||
if placeholder.startswith('{{') and placeholder.endswith('}}'):
|
||||
key = placeholder[2:-2].strip() # Remove {{ }} and whitespace
|
||||
parts = key.split('.')
|
||||
if len(parts) == 2 and parts[0] == 'SECRET':
|
||||
secret_key = parts[1]
|
||||
if secret_key in secrets_data:
|
||||
config_data['KEYS'] = secrets_data[secret_key]
|
||||
print(f"Replaced KEYS with secret: {config_data['KEYS']}") # Debug print
|
||||
else:
|
||||
print(f"Secret key '{secret_key}' not found in secrets file")
|
||||
else:
|
||||
print(f"Invalid secret placeholder format: {placeholder}")
|
||||
|
||||
# Convert 'on'/'off' to boolean for MODULES if they are strings
|
||||
for key, value in config_data['MODULES'].items():
|
||||
if isinstance(value, str):
|
||||
config_data['MODULES'][key] = value.lower() == 'on'
|
||||
elif isinstance(value, bool):
|
||||
config_data['MODULES'][key] = value
|
||||
else:
|
||||
raise ValueError(f"Invalid value for module {key}: {value}. Must be 'on', 'off', True, or False.")
|
||||
|
||||
return cls(**config_data)
|
||||
|
||||
class Configuration(BaseModel):
|
||||
@classmethod
|
||||
def load_config(cls: Type[T], yaml_path: Union[str, Path]) -> Union[T, List[T]]:
|
||||
yaml_path = Path(yaml_path)
|
||||
with yaml_path.open('r') as file:
|
||||
config_data = yaml.safe_load(file)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Resolve placeholders
|
||||
config_data = cls.resolve_placeholders(config_data)
|
||||
|
||||
if isinstance(config_data, list):
|
||||
return [cls.create_dynamic_model(**cfg) for cfg in config_data]
|
||||
elif isinstance(config_data, dict):
|
||||
return cls.create_dynamic_model(**config_data)
|
||||
else:
|
||||
raise ValueError(f"Unsupported YAML structure in {yaml_path}")
|
||||
|
||||
@classmethod
|
||||
def resolve_placeholders(cls, data):
|
||||
if isinstance(data, dict):
|
||||
return {k: cls.resolve_placeholders(v) for k, v in data.items()}
|
||||
elif isinstance(data, list):
|
||||
return [cls.resolve_placeholders(v) for v in data]
|
||||
elif isinstance(data, str):
|
||||
return cls.resolve_string_placeholders(data)
|
||||
else:
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def resolve_string_placeholders(cls, value):
|
||||
pattern = r'\{\{\s*([^}]+)\s*\}\}'
|
||||
matches = re.findall(pattern, value)
|
||||
|
||||
for match in matches:
|
||||
parts = match.split('.')
|
||||
if len(parts) == 2:
|
||||
category, key = parts
|
||||
if category == 'DIR':
|
||||
replacement = str(Path(os.getenv(key, '')))
|
||||
elif category == 'SECRET':
|
||||
replacement = os.getenv(key, '')
|
||||
else:
|
||||
replacement = os.getenv(match, '')
|
||||
|
||||
value = value.replace('{{' + match + '}}', replacement)
|
||||
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def create_dynamic_model(cls, **data):
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
data[key] = cls.create_dynamic_model(**value)
|
||||
|
||||
DynamicModel = create_model(
|
||||
f'Dynamic{cls.__name__}',
|
||||
__base__=cls,
|
||||
**{k: (type(v), v) for k, v in data.items()}
|
||||
)
|
||||
return DynamicModel(**data)
|
||||
|
||||
class Config:
|
||||
extra = "allow"
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
class Location(BaseModel):
|
||||
latitude: float
|
||||
|
|
31
sijapi/config/api.yaml-example
Normal file
31
sijapi/config/api.yaml-example
Normal file
|
@ -0,0 +1,31 @@
|
|||
BIND: 0.0.0.0
|
||||
PORT: 4444
|
||||
URL: https://api.yourdomain.com
|
||||
PUBLIC:
|
||||
- /id
|
||||
- /ip
|
||||
- /health
|
||||
- /img/
|
||||
- /cl/dockets
|
||||
- /cl/search
|
||||
- /cd/alert
|
||||
TRUSTED_SUBNETS:
|
||||
- 127.0.0.1/32 # don't change this
|
||||
- 192.168.50.0/24 # optionally set to your local subnet, or omit
|
||||
- 100.11.11.0/24 # optionally set to your tailscale subnet, or omit
|
||||
MODULES:
|
||||
asr: on
|
||||
calendar: on
|
||||
email: on
|
||||
health: on
|
||||
hooks: on
|
||||
llm: on
|
||||
locate: on
|
||||
note: on
|
||||
sd: on
|
||||
serve: on
|
||||
time: on
|
||||
tts: on
|
||||
weather: on
|
||||
TZ: 'America/Los_Angeles' # this is just for the initial config, and is dynamically updated based on location
|
||||
KEYS: ['{{ SECRET.GLOBAL_API_KEYS }}'] # sourced from .env
|
|
@ -2,35 +2,33 @@ scenes:
|
|||
- scene: default
|
||||
triggers:
|
||||
- ""
|
||||
API_PPrompt: "Highly-detailed image of "
|
||||
API_SPrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
|
||||
API_NPrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
|
||||
API_PrePrompt: "Highly-detailed image of "
|
||||
API_StylePrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
|
||||
API_NegativePrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
|
||||
llm_sys_msg: "You are a helpful AI who assists in refining prompts that will be used to generate highly realistic images. Upon receiving a prompt, you refine it by simplifying and distilling it to its essence, retaining the most visually evocative and distinct elements from what was provided. You may infer some visual details that were not provided in the prompt, so long as they are consistent with the prompt. Always use the most visually descriptive terms possible, and avoid any vague or abstract concepts. Do not include any words or descriptions based on other senses or emotions. Strive to show rather than tell. Space is limited, so be efficient with your words."
|
||||
llm_pre_prompt: "Using the most visually descriptive sentence fragments, phrases, and words, distill this scene description to its essence, staying true to what it describes: "
|
||||
workflows:
|
||||
- workflow: default.json
|
||||
size: 1024x768
|
||||
|
||||
- scene: wallpaper
|
||||
triggers:
|
||||
- wallpaper
|
||||
API_PPrompt: "Stunning widescreen image of "
|
||||
API_SPrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
|
||||
API_NPrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
|
||||
API_PrePrompt: "Stunning widescreen image of "
|
||||
API_StylePrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
|
||||
API_NegativePrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
|
||||
llm_sys_msg: "You are a helpful AI who assists in generating prompts that will be used to generate highly realistic images. Always use the most visually descriptive terms possible, and avoid any vague or abstract concepts. Do not include any words or descriptions based on other senses or emotions. Strive to show rather than tell. Space is limited, so be efficient with your words."
|
||||
llm_pre_prompt: "Using a series of words or sentence fragments separated by commas, describe a professional landscape photograph of a striking scene of nature. You can select any place on Earth that a young model from the Pacific Northwest is likely to travel to. Focus on describing the content and composition of the image. Only use words and phrases that are visually descriptive. This model is especially fond of wild and rugged places, mountains. She favors dark muted earth tones, dramatic lighting, and interesting juxtapositions between foreground and background, or center of frame and outer frame areas. Avoid cliche situations; instread strive for nuance and originality in composition and environment."
|
||||
workflows:
|
||||
- workflow: wallpaper.json
|
||||
size: 1024x640
|
||||
|
||||
- scene: portrait
|
||||
triggers:
|
||||
- portrait
|
||||
- profile
|
||||
- headshot
|
||||
API_PPrompt: "Highly-detailed portrait photo of "
|
||||
API_SPrompt: "; attractive, cute, (((masterpiece))); ((beautiful lighting)), subdued, fine detail, extremely sharp, 8k, insane detail, dynamic lighting, cinematic, best quality, ultra detailed."
|
||||
API_NPrompt: "canvas frame, 3d, bad art, illustrated, deformed, blurry, duplicate, bad anatomy, worst quality, low quality, watermark, FastNegativeV2, easynegative, epiCNegative, easynegative, verybadimagenegative_v1.3, nsfw, nude"
|
||||
API_PrePrompt: "Highly-detailed portrait photo of "
|
||||
API_StylePrompt: "; attractive, cute, (((masterpiece))); ((beautiful lighting)), subdued, fine detail, extremely sharp, 8k, insane detail, dynamic lighting, cinematic, best quality, ultra detailed."
|
||||
API_NegativePrompt: "canvas frame, 3d, bad art, illustrated, deformed, blurry, duplicate, bad anatomy, worst quality, low quality, watermark, FastNegativeV2, easynegative, epiCNegative, easynegative, verybadimagenegative_v1.3, nsfw, nude"
|
||||
llm_sys_msg: "You are a helpful AI who assists in refining prompts that will be used to generate highly realistic portrait photos. Upon receiving a prompt, you refine it by simplifying and distilling it to its essence, retaining the most visually evocative and distinct elements from what was provided, focusing in particular on the pictured individual's eyes, pose, and other distinctive features. You may infer some visual details that were not provided in the prompt, so long as they are consistent with the rest of the prompt. Always use the most visually descriptive terms possible, and avoid any vague or abstract concepts. Do not include any words or descriptions based on other senses or emotions. Strive to show rather than tell. Space is limited, so be efficient with your words. Remember that the final product will be a still image, and action verbs are not as helpful as simple descriptions of position, appearance, background, etc."
|
||||
llm_pre_prompt: "Using the most visually descriptive sentence fragments, phrases, and words, distill this portrait photo to its essence: "
|
||||
workflows:
|
||||
|
|
3
sijapi/config/secrets.yaml-example
Normal file
3
sijapi/config/secrets.yaml-example
Normal file
|
@ -0,0 +1,3 @@
|
|||
GLOBAL_API_KEYS:
|
||||
- sk-YOUR-FIRST-API-KEY
|
||||
- sk-YOUR-SECOND-API-KEY
|
|
@ -10,7 +10,7 @@
|
|||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "API_PPrompt",
|
||||
"text": "API_PrePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -23,7 +23,7 @@
|
|||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "API_NPrompt",
|
||||
"text": "API_NegativePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -186,7 +186,7 @@
|
|||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"text": "API_SPrompt",
|
||||
"text": "API_StylePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "API_PPrompt",
|
||||
"text": "API_PrePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -23,7 +23,7 @@
|
|||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "API_NPrompt",
|
||||
"text": "API_NegativePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -186,7 +186,7 @@
|
|||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"text": "API_SPrompt",
|
||||
"text": "API_StylePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
|
|
@ -118,7 +118,7 @@
|
|||
},
|
||||
"17": {
|
||||
"inputs": {
|
||||
"text": "API_PPrompt"
|
||||
"text": "API_PrePrompt"
|
||||
},
|
||||
"class_type": "JWStringMultiline",
|
||||
"_meta": {
|
||||
|
@ -127,7 +127,7 @@
|
|||
},
|
||||
"18": {
|
||||
"inputs": {
|
||||
"text": "API_NPrompt"
|
||||
"text": "API_NegativePrompt"
|
||||
},
|
||||
"class_type": "JWStringMultiline",
|
||||
"_meta": {
|
||||
|
@ -444,7 +444,7 @@
|
|||
},
|
||||
"96": {
|
||||
"inputs": {
|
||||
"text": "API_SPrompt"
|
||||
"text": "API_StylePrompt"
|
||||
},
|
||||
"class_type": "JWStringMultiline",
|
||||
"_meta": {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "API_PPrompt",
|
||||
"text": "API_PrePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -23,7 +23,7 @@
|
|||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "API_NPrompt",
|
||||
"text": "API_NegativePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -186,7 +186,7 @@
|
|||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"text": "API_SPrompt",
|
||||
"text": "API_StylePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "API_PPrompt",
|
||||
"text": "API_PrePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -23,7 +23,7 @@
|
|||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "API_NPrompt",
|
||||
"text": "API_NegativePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
@ -202,7 +202,7 @@
|
|||
},
|
||||
"21": {
|
||||
"inputs": {
|
||||
"text": "API_SPrompt",
|
||||
"text": "API_StylePrompt",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
|
|
60
sijapi/routers/dist.py
Normal file
60
sijapi/routers/dist.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
'''
|
||||
WORK IN PROGRESS: This module will handle tasks related to multi-server environments.
|
||||
'''
|
||||
from fastapi import APIRouter, HTTPException
|
||||
import asyncio
|
||||
import logging
|
||||
from sijapi.utilities import run_ssh_command
|
||||
from sijapi import REBOOT_SCRIPT_PATH, HOST_CONFIG, API_CONFIG
|
||||
|
||||
dist = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dist.get("/update-restart-others")
|
||||
async def update_and_restart_others():
|
||||
results = []
|
||||
for server in API_CONFIG.servers:
|
||||
if HOST_CONFIG.id != server.id:
|
||||
try:
|
||||
output, error = await run_ssh_command(server, f"bash {server.scripts.update_and_restart}")
|
||||
results.append({"server": server.id, "status": "success", "output": output, "error": error})
|
||||
except Exception as e:
|
||||
results.append({"server": server.id, "status": "failed", "error": str(e)})
|
||||
return {"message": "Update and restart process initiated for other servers.", "results": results}
|
||||
|
||||
@dist.get("/update-restart-self")
|
||||
async def update_and_restart_self(safe: bool = True):
|
||||
if safe and not await ensure_redundancy():
|
||||
raise HTTPException(status_code=400, detail="Cannot safely restart: no redundancy available")
|
||||
try:
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
"bash", API_CONFIG.servers[HOST_CONFIG.id].scripts.update_and_restart,
|
||||
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
||||
)
|
||||
stdout, stderr = await process.communicate()
|
||||
logger.info(f"Update and restart initiated for self. Stdout: {stdout.decode()}. Stderr: {stderr.decode()}")
|
||||
return {"message": "Update and restart process initiated for this server."}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initiate update and restart for self: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to initiate update and restart: {str(e)}")
|
||||
|
||||
@dist.get("/update-and-restart-all")
|
||||
async def update_and_restart_all():
|
||||
others_result = await update_and_restart_others()
|
||||
self_result = await update_and_restart_self(safe=False)
|
||||
return {"others": others_result, "self": self_result}
|
||||
|
||||
async def ensure_redundancy():
|
||||
import aiohttp
|
||||
redundancy = False
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for server in API_CONFIG.servers:
|
||||
if server.id != HOST_CONFIG.id:
|
||||
try:
|
||||
async with session.get(f"{server.protocol}://{server.ip}:{server.port}/health") as response:
|
||||
if response.status // 100 == 2:
|
||||
redundancy = True
|
||||
break
|
||||
except aiohttp.ClientError:
|
||||
logger.warning(f"Failed to check health of server {server.id}")
|
||||
return redundancy
|
|
@ -483,15 +483,15 @@ def update_prompt(workflow: dict, post: dict, positive: str, found_key=[None], p
|
|||
for index, item in enumerate(value):
|
||||
update_prompt(item, post, positive, found_key, current_path + [str(index)])
|
||||
|
||||
if value == "API_PPrompt":
|
||||
if value == "API_PrePrompt":
|
||||
workflow[key] = post.get(value, "") + positive
|
||||
L.DEBUG(f"Updated API_PPrompt to: {workflow[key]}")
|
||||
elif value == "API_SPrompt":
|
||||
L.DEBUG(f"Updated API_PrePrompt to: {workflow[key]}")
|
||||
elif value == "API_StylePrompt":
|
||||
workflow[key] = post.get(value, "")
|
||||
L.DEBUG(f"Updated API_SPrompt to: {workflow[key]}")
|
||||
elif value == "API_NPrompt":
|
||||
L.DEBUG(f"Updated API_StylePrompt to: {workflow[key]}")
|
||||
elif value == "API_NegativePrompt":
|
||||
workflow[key] = post.get(value, "")
|
||||
L.DEBUG(f"Updated API_NPrompt to: {workflow[key]}")
|
||||
L.DEBUG(f"Updated API_NegativePrompt to: {workflow[key]}")
|
||||
elif key == "seed" or key == "noise_seed":
|
||||
workflow[key] = random.randint(1000000000000, 9999999999999)
|
||||
L.DEBUG(f"Updated seed to: {workflow[key]}")
|
||||
|
@ -507,7 +507,7 @@ def update_prompt(workflow: dict, post: dict, positive: str, found_key=[None], p
|
|||
|
||||
return found_key[0]
|
||||
|
||||
def update_prompt_custom(workflow: dict, API_PPrompt: str, API_SPrompt: str, API_NPrompt: str, found_key=[None], path=None):
|
||||
def update_prompt_custom(workflow: dict, API_PrePrompt: str, API_StylePrompt: str, API_NegativePrompt: str, found_key=[None], path=None):
|
||||
if path is None:
|
||||
path = []
|
||||
|
||||
|
@ -519,21 +519,21 @@ def update_prompt_custom(workflow: dict, API_PPrompt: str, API_SPrompt: str, API
|
|||
if isinstance(value, dict):
|
||||
if value.get('class_type') == 'SaveImage' and value.get('inputs', {}).get('filename_prefix') == 'API_':
|
||||
found_key[0] = key
|
||||
update_prompt(value, API_PPrompt, API_SPrompt, API_NPrompt, found_key, current_path)
|
||||
update_prompt(value, API_PrePrompt, API_StylePrompt, API_NegativePrompt, found_key, current_path)
|
||||
elif isinstance(value, list):
|
||||
# Recursive call with updated path for each item in a list
|
||||
for index, item in enumerate(value):
|
||||
update_prompt(item, API_PPrompt, API_SPrompt, API_NPrompt, found_key, current_path + [str(index)])
|
||||
update_prompt(item, API_PrePrompt, API_StylePrompt, API_NegativePrompt, found_key, current_path + [str(index)])
|
||||
|
||||
if value == "API_PPrompt":
|
||||
workflow[key] = API_PPrompt
|
||||
L.DEBUG(f"Updated API_PPrompt to: {workflow[key]}")
|
||||
elif value == "API_SPrompt":
|
||||
workflow[key] = API_SPrompt
|
||||
L.DEBUG(f"Updated API_SPrompt to: {workflow[key]}")
|
||||
elif value == "API_NPrompt":
|
||||
workflow[key] = API_NPrompt
|
||||
L.DEBUG(f"Updated API_NPrompt to: {workflow[key]}")
|
||||
if value == "API_PrePrompt":
|
||||
workflow[key] = API_PrePrompt
|
||||
L.DEBUG(f"Updated API_PrePrompt to: {workflow[key]}")
|
||||
elif value == "API_StylePrompt":
|
||||
workflow[key] = API_StylePrompt
|
||||
L.DEBUG(f"Updated API_StylePrompt to: {workflow[key]}")
|
||||
elif value == "API_NegativePrompt":
|
||||
workflow[key] = API_NegativePrompt
|
||||
L.DEBUG(f"Updated API_NegativePrompt to: {workflow[key]}")
|
||||
elif key == "seed" or key == "noise_seed":
|
||||
workflow[key] = random.randint(1000000000000, 9999999999999)
|
||||
L.DEBUG(f"Updated seed to: {workflow[key]}")
|
||||
|
@ -682,9 +682,9 @@ def handle_custom_image(custom_post: str):
|
|||
else:
|
||||
workflow_name = args.workflow if args.workflow else "selfie"
|
||||
post = {
|
||||
"API_PPrompt": "",
|
||||
"API_SPrompt": "; (((masterpiece))); (beautiful lighting:1), subdued, fine detail, extremely sharp, 8k, insane detail, dynamic lighting, cinematic, best quality, ultra detailed.",
|
||||
"API_NPrompt": "canvas frame, 3d, ((bad art)), illustrated, deformed, blurry, duplicate, bad art, bad anatomy, worst quality, low quality, watermark, FastNegativeV2, (easynegative:0.5), epiCNegative, easynegative, verybadimagenegative_v1.3",
|
||||
"API_PrePrompt": "",
|
||||
"API_StylePrompt": "; (((masterpiece))); (beautiful lighting:1), subdued, fine detail, extremely sharp, 8k, insane detail, dynamic lighting, cinematic, best quality, ultra detailed.",
|
||||
"API_NegativePrompt": "canvas frame, 3d, ((bad art)), illustrated, deformed, blurry, duplicate, bad art, bad anatomy, worst quality, low quality, watermark, FastNegativeV2, (easynegative:0.5), epiCNegative, easynegative, verybadimagenegative_v1.3",
|
||||
"Vision_Prompt": "Write an upbeat Instagram description with emojis to accompany this selfie!",
|
||||
"frequency": 2,
|
||||
"ghost_tags": [
|
||||
|
|
|
@ -32,12 +32,43 @@ from pathlib import Path
|
|||
from fastapi import APIRouter, Query, HTTPException
|
||||
from sijapi import L, OBSIDIAN_VAULT_DIR, OBSIDIAN_RESOURCES_DIR, ARCHIVE_DIR, BASE_URL, OBSIDIAN_BANNER_SCENE, DEFAULT_11L_VOICE, DEFAULT_VOICE, GEO
|
||||
from sijapi.routers import cal, loc, tts, llm, time, sd, weather, asr
|
||||
from sijapi.utilities import assemble_journal_path, assemble_archive_path, convert_to_12_hour_format, sanitize_filename, convert_degrees_to_cardinal, HOURLY_COLUMNS_MAPPING
|
||||
from sijapi.utilities import assemble_journal_path, assemble_archive_path, convert_to_12_hour_format, sanitize_filename, convert_degrees_to_cardinal, check_file_name, HOURLY_COLUMNS_MAPPING
|
||||
from sijapi.classes import Location
|
||||
|
||||
note = APIRouter()
|
||||
|
||||
def list_and_correct_impermissible_files(root_dir, rename: bool = False):
|
||||
"""List and correct all files with impermissible names."""
|
||||
impermissible_files = []
|
||||
for dirpath, _, filenames in os.walk(root_dir):
|
||||
for filename in filenames:
|
||||
if check_file_name(filename):
|
||||
file_path = Path(dirpath) / filename
|
||||
impermissible_files.append(file_path)
|
||||
L.DEBUG(f"Impermissible file found: {file_path}")
|
||||
|
||||
# Sanitize the file name
|
||||
new_filename = sanitize_filename(filename)
|
||||
new_file_path = Path(dirpath) / new_filename
|
||||
|
||||
# Ensure the new file name does not already exist
|
||||
if new_file_path.exists():
|
||||
counter = 1
|
||||
base_name, ext = os.path.splitext(new_filename)
|
||||
while new_file_path.exists():
|
||||
new_filename = f"{base_name}_{counter}{ext}"
|
||||
new_file_path = Path(dirpath) / new_filename
|
||||
counter += 1
|
||||
|
||||
# Rename the file
|
||||
if rename:
|
||||
os.rename(file_path, new_file_path)
|
||||
L.DEBUG(f"Renamed: {file_path} -> {new_file_path}")
|
||||
|
||||
return impermissible_files
|
||||
|
||||
journal = OBSIDIAN_VAULT_DIR / "journal"
|
||||
list_and_correct_impermissible_files(journal, rename=True)
|
||||
|
||||
### Daily Note Builder ###
|
||||
|
||||
|
|
|
@ -90,9 +90,9 @@ async def workflow(prompt: str, scene: str = None, size: str = None, style: str
|
|||
workflow_data = json.loads(workflow_path.read_text())
|
||||
|
||||
post = {
|
||||
"API_PPrompt": scene_data['API_PPrompt'] + image_concept + ', '.join(f"; (({trigger}))" for trigger in scene_data['triggers']),
|
||||
"API_SPrompt": scene_data['API_SPrompt'],
|
||||
"API_NPrompt": scene_data['API_NPrompt'],
|
||||
"API_PrePrompt": scene_data['API_PrePrompt'] + image_concept + ', '.join(f"; (({trigger}))" for trigger in scene_data['triggers']),
|
||||
"API_StylePrompt": scene_data['API_StylePrompt'],
|
||||
"API_NegativePrompt": scene_data['API_NegativePrompt'],
|
||||
"width": width,
|
||||
"height": height
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ async def load_workflow(workflow_path: str, workflow:str):
|
|||
|
||||
async def update_prompt_and_get_key(workflow: dict, post: dict, positive: str):
|
||||
'''
|
||||
Recurses through the workflow searching for and substituting the dynamic values for API_PPrompt, API_SPrompt, API_NPrompt, width, height, and seed (random integer).
|
||||
Recurses through the workflow searching for and substituting the dynamic values for API_PrePrompt, API_StylePrompt, API_NegativePrompt, width, height, and seed (random integer).
|
||||
Even more important, it finds and returns the key to the filepath where the file is saved, which we need to decipher status when generation is complete.
|
||||
'''
|
||||
found_key = [None]
|
||||
|
@ -427,9 +427,9 @@ Even more important, it finds and returns the key to the filepath where the file
|
|||
for index, item in enumerate(value):
|
||||
update_recursive(item, current_path + [str(index)])
|
||||
|
||||
if value == "API_PPrompt":
|
||||
if value == "API_PrePrompt":
|
||||
workflow[key] = post.get(value, "") + positive
|
||||
elif value in ["API_SPrompt", "API_NPrompt"]:
|
||||
elif value in ["API_StylePrompt", "API_NegativePrompt"]:
|
||||
workflow[key] = post.get(value, "")
|
||||
elif key in ["seed", "noise_seed"]:
|
||||
workflow[key] = random.randint(1000000000000, 9999999999999)
|
||||
|
|
|
@ -6,6 +6,7 @@ import io
|
|||
from io import BytesIO
|
||||
import base64
|
||||
import math
|
||||
import paramiko
|
||||
from dateutil import parser
|
||||
from pathlib import Path
|
||||
import filetype
|
||||
|
@ -21,7 +22,6 @@ import pandas as pd
|
|||
from scipy.spatial import cKDTree
|
||||
from dateutil.parser import parse as dateutil_parse
|
||||
from docx import Document
|
||||
import asyncpg
|
||||
from sshtunnel import SSHTunnelForwarder
|
||||
from fastapi import Depends, HTTPException, Request, UploadFile
|
||||
from fastapi.security.api_key import APIKeyHeader
|
||||
|
@ -192,37 +192,6 @@ def check_file_name(file_name, max_length=255):
|
|||
return needs_sanitization
|
||||
|
||||
|
||||
def list_and_correct_impermissible_files(root_dir, rename: bool = False):
|
||||
"""List and correct all files with impermissible names."""
|
||||
impermissible_files = []
|
||||
for dirpath, _, filenames in os.walk(root_dir):
|
||||
for filename in filenames:
|
||||
if check_file_name(filename):
|
||||
file_path = Path(dirpath) / filename
|
||||
impermissible_files.append(file_path)
|
||||
L.DEBUG(f"Impermissible file found: {file_path}")
|
||||
|
||||
# Sanitize the file name
|
||||
new_filename = sanitize_filename(filename)
|
||||
new_file_path = Path(dirpath) / new_filename
|
||||
|
||||
# Ensure the new file name does not already exist
|
||||
if new_file_path.exists():
|
||||
counter = 1
|
||||
base_name, ext = os.path.splitext(new_filename)
|
||||
while new_file_path.exists():
|
||||
new_filename = f"{base_name}_{counter}{ext}"
|
||||
new_file_path = Path(dirpath) / new_filename
|
||||
counter += 1
|
||||
|
||||
# Rename the file
|
||||
if rename:
|
||||
os.rename(file_path, new_file_path)
|
||||
L.DEBUG(f"Renamed: {file_path} -> {new_file_path}")
|
||||
|
||||
return impermissible_files
|
||||
|
||||
|
||||
def bool_convert(value: str = Form(None)):
|
||||
return value.lower() in ["true", "1", "t", "y", "yes"]
|
||||
|
||||
|
@ -473,3 +442,16 @@ def load_geonames_data(path: str):
|
|||
|
||||
return data
|
||||
|
||||
async def run_ssh_command(server, command):
|
||||
try:
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
ssh.connect(server.ssh.host, username=server.ssh.user, password=server.ssh.password)
|
||||
stdin, stdout, stderr = ssh.exec_command(command)
|
||||
output = stdout.read().decode()
|
||||
error = stderr.read().decode()
|
||||
ssh.close()
|
||||
return output, error
|
||||
except Exception as e:
|
||||
L.ERR(f"SSH command failed for server {server.id}: {str(e)}")
|
||||
raise
|
Loading…
Reference in a new issue