Auto-update: Mon Jul 1 20:48:53 PDT 2024

This commit is contained in:
sanj 2024-07-01 20:48:53 -07:00
parent 79d139312a
commit acb5e0ccaa
16 changed files with 140 additions and 110 deletions

View file

@ -1,58 +1 @@
import requests how
import os
import json
filename = 'location_log.json'
server = '!{!{ ENTER A PUBLIC URL TO YOUR SIJAPI INSTANCE }!}!'
api_key = '!{!{ ENTER YOUR GLOBAL_API_KEY HERE }!}!'
def upload_location_data(data):
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
try:
response = requests.post(f'{server}/locate', json=data, headers=headers)
if response.status_code == 200:
print('Location and weather updated successfully.')
os.remove(filename)
else:
print(f'Failed to post data. Status code: {response.status_code}')
print(response.text)
except requests.RequestException as e:
print(f'Error posting data: {e}')
if not os.path.exists(filename):
print('No data to upload.')
else:
try:
with open(filename, 'r') as f:
data = json.load(f)
# Ensure all datetime fields are correctly named and add default context if missing
for location in data:
if 'date' in location:
location['datetime'] = location.pop('date')
# Ensure context dictionary exists with all required keys
if 'context' not in location:
location['context'] = {
'action': 'manual',
'device_type': 'Pythonista',
'device_model': None,
'device_name': None,
'device_os': None
}
else:
context = location['context']
context.setdefault('action', 'manual')
context.setdefault('device_type', 'Pythonista')
context.setdefault('device_model', None)
context.setdefault('device_name', None)
context.setdefault('device_os', None)
upload_location_data(data)
except FileNotFoundError:
print(f'File {filename} not found.')
except json.JSONDecodeError:
print(f'Error decoding JSON from {filename}.')
except Exception as e:
print(f'Unexpected error: {e}')

View file

@ -1,3 +1,4 @@
# __init__.py
import os import os
import json import json
import yaml import yaml

View file

@ -1,4 +1,5 @@
#!/Users/sij/miniforge3/envs/api/bin/python #!/Users/sij/miniforge3/envs/api/bin/python
#__main__.py
from fastapi import FastAPI, Request, HTTPException, Response from fastapi import FastAPI, Request, HTTPException, Response
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware

View file

@ -1,3 +1,4 @@
# classes.py
import asyncio import asyncio
import json import json
import math import math

58
sijapi/cli.py Normal file
View file

@ -0,0 +1,58 @@
# cli.py
import click
import asyncio
from datetime import datetime as dt_datetime, timedelta
# Import your async functions and dependencies
from your_main_app import build_daily_note_range_endpoint, loc
def async_command(f):
@click.command()
@click.pass_context
def wrapper(ctx, *args, **kwargs):
async def run():
return await f(*args, **kwargs)
return asyncio.run(run())
return wrapper
@click.group()
def cli():
"""CLI for your application."""
pass
@cli.command()
@click.argument('dt_start')
@click.argument('dt_end')
@async_command
async def bulk_note_range(dt_start: str, dt_end: str):
"""
Build daily notes for a date range.
DT_START and DT_END should be in YYYY-MM-DD format.
"""
try:
start_date = dt_datetime.strptime(dt_start, "%Y-%m-%d")
end_date = dt_datetime.strptime(dt_end, "%Y-%m-%d")
except ValueError:
click.echo("Error: Dates must be in YYYY-MM-DD format.")
return
if start_date > end_date:
click.echo("Error: Start date must be before or equal to end date.")
return
results = []
current_date = start_date
while current_date <= end_date:
formatted_date = await loc.dt(current_date)
result = await build_daily_note(formatted_date)
results.append(result)
current_date += timedelta(days=1)
click.echo("Generated notes for the following dates:")
for url in results:
click.echo(url)
if __name__ == '__main__':
cli()

View file

@ -1,22 +1,34 @@
from aura_sr import AuraSR from aura_sr import AuraSR
from PIL import Image from PIL import Image
import torch
import os
aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR") # Set environment variables for MPS
os.environ['PYTORCH_MPS_HIGH_WATERMARK_RATIO'] = '0.0'
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
# Initialize device as CPU for default
device = torch.device('cpu')
# Check if MPS is available
if torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
device = torch.device('mps:0')
# Overwrite the default CUDA device with MPS
torch.cuda.default_stream = device
aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR").to(device)
def load_image_from_path(file_path): def load_image_from_path(file_path):
# Open image file
return Image.open(file_path) return Image.open(file_path)
def upscale_and_save(original_path): def upscale_and_save(original_path):
# load the image from the path
original_image = load_image_from_path(original_path) original_image = load_image_from_path(original_path)
# upscale the image using the pretrained model
upscaled_image = aura_sr.upscale_4x(original_image) upscaled_image = aura_sr.upscale_4x(original_image)
# save the upscaled image back to the original path
# Overwrite the original image
upscaled_image.save(original_path) upscaled_image.save(original_path)
# Now to use the function, provide the image path # Insert your image path
upscale_and_save("/Users/sij/workshop/sijapi/sijapi/testbed/API__00482_ 2.png") upscale_and_save("/Users/sij/workshop/sijapi/sijapi/testbed/API__00482_ 2.png")

View file

@ -1,3 +1,4 @@
# logs.py
import os import os
import sys import sys
from loguru import logger from loguru import logger

View file

@ -1,6 +1,7 @@
''' '''
Uses whisper_cpp to create an OpenAI-compatible Whisper web service. Uses whisper_cpp to create an OpenAI-compatible Whisper web service.
''' '''
# routers/asr.py
import os import os
import sys import sys
import uuid import uuid

View file

@ -66,7 +66,7 @@ IG_VISION_LLM = os.getenv("IG_VISION_LLM")
IG_PROMPT_LLM = os.getenv("IG_PROMPT_LLM") IG_PROMPT_LLM = os.getenv("IG_PROMPT_LLM")
IG_IMG_GEN = os.getenv("IG_IMG_GEN", "ComfyUI") IG_IMG_GEN = os.getenv("IG_IMG_GEN", "ComfyUI")
IG_OUTPUT_PLATFORMS = os.getenv("IG_OUTPUT_PLATFORMS", "ig,ghost,obsidian").split(',') IG_OUTPUT_PLATFORMS = os.getenv("IG_OUTPUT_PLATFORMS", "ig,ghost,obsidian").split(',')
SD_WORKFLOWS_DIR = os.path.join(COMFYUI_DIR, 'workflows') IMG_WORKFLOWS_DIR = os.path.join(COMFYUI_DIR, 'workflows')
COMFYUI_OUTPUT_DIR = COMFYUI_DIR / 'output' COMFYUI_OUTPUT_DIR = COMFYUI_DIR / 'output'
IG_PROFILES_DIR = os.path.join(BASE_DIR, 'profiles') IG_PROFILES_DIR = os.path.join(BASE_DIR, 'profiles')
IG_PROFILE_DIR = os.path.join(IG_PROFILES_DIR, PROFILE) IG_PROFILE_DIR = os.path.join(IG_PROFILES_DIR, PROFILE)
@ -793,7 +793,7 @@ def load_json(json_payload, workflow):
if json_payload: if json_payload:
return json.loads(json_payload) return json.loads(json_payload)
elif workflow: elif workflow:
workflow_path = os.path.join(SD_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) workflow_path = os.path.join(IMG_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow)
with open(workflow_path, 'r') as file: with open(workflow_path, 'r') as file:
return json.load(file) return json.load(file)
else: else:

View file

@ -2,7 +2,7 @@
Image generation module using StableDiffusion and similar models by way of ComfyUI. Image generation module using StableDiffusion and similar models by way of ComfyUI.
DEPENDS ON: DEPENDS ON:
LLM module LLM module
COMFYUI_URL, COMFYUI_DIR, COMFYUI_OUTPUT_DIR, TS_SUBNET, TS_ADDRESS, DATA_DIR, SD_CONFIG_DIR, SD_IMAGE_DIR, SD_WORKFLOWS_DIR, LOCAL_HOSTS, API.URL, PHOTOPRISM_USER*, PHOTOPRISM_URL*, PHOTOPRISM_PASS* COMFYUI_URL, COMFYUI_DIR, COMFYUI_OUTPUT_DIR, TS_SUBNET, TS_ADDRESS, DATA_DIR, IMG_CONFIG_DIR, IMG_DIR, IMG_WORKFLOWS_DIR, LOCAL_HOSTS, API.URL, PHOTOPRISM_USER*, PHOTOPRISM_URL*, PHOTOPRISM_PASS*
*unimplemented. *unimplemented.
''' '''
@ -30,7 +30,7 @@ import shutil
# from photoprism.Photo import Photo # from photoprism.Photo import Photo
# from webdav3.client import Client # from webdav3.client import Client
from sijapi.routers.llm import query_ollama from sijapi.routers.llm import query_ollama
from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, SD_CONFIG_PATH, SD_IMAGE_DIR, SD_WORKFLOWS_DIR from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, IMG_CONFIG_PATH, IMG_DIR, IMG_WORKFLOWS_DIR
img = APIRouter() img = APIRouter()
@ -86,7 +86,7 @@ async def workflow(prompt: str, scene: str = None, size: str = None, earlyout: s
width, height = map(int, size.split('x')) width, height = map(int, size.split('x'))
L.DEBUG(f"Parsed width: {width}; parsed height: {height}") L.DEBUG(f"Parsed width: {width}; parsed height: {height}")
workflow_path = Path(SD_WORKFLOWS_DIR) / scene_workflow['workflow'] workflow_path = Path(IMG_WORKFLOWS_DIR) / scene_workflow['workflow']
workflow_data = json.loads(workflow_path.read_text()) workflow_data = json.loads(workflow_path.read_text())
post = { post = {
@ -104,7 +104,7 @@ async def workflow(prompt: str, scene: str = None, size: str = None, earlyout: s
print(f"Prompt ID: {prompt_id}") print(f"Prompt ID: {prompt_id}")
max_size = max(width, height) if downscale_to_fit else None max_size = max(width, height) if downscale_to_fit else None
destination_path = Path(destination_path).with_suffix(".jpg") if destination_path else SD_IMAGE_DIR / f"{prompt_id}.jpg" destination_path = Path(destination_path).with_suffix(".jpg") if destination_path else IMG_DIR / f"{prompt_id}.jpg"
if earlyout: if earlyout:
asyncio.create_task(generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path)) asyncio.create_task(generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path))
@ -132,7 +132,7 @@ async def generate_and_save_image(prompt_id, saved_file_key, max_size, destinati
def get_web_path(file_path: Path) -> str: def get_web_path(file_path: Path) -> str:
uri = file_path.relative_to(SD_IMAGE_DIR) uri = file_path.relative_to(IMG_DIR)
web_path = f"{API.URL}/img/{uri}" web_path = f"{API.URL}/img/{uri}"
return web_path return web_path
@ -174,8 +174,8 @@ async def get_image(status_data, key):
async def save_as_jpg(image_data, prompt_id, max_size = None, quality = 100, destination_path: Path = None): async def save_as_jpg(image_data, prompt_id, max_size = None, quality = 100, destination_path: Path = None):
destination_path_png = (SD_IMAGE_DIR / prompt_id).with_suffix(".png") destination_path_png = (IMG_DIR / prompt_id).with_suffix(".png")
destination_path_jpg = destination_path.with_suffix(".jpg") if destination_path else (SD_IMAGE_DIR / prompt_id).with_suffix(".jpg") destination_path_jpg = destination_path.with_suffix(".jpg") if destination_path else (IMG_DIR / prompt_id).with_suffix(".jpg")
try: try:
destination_path_png.parent.mkdir(parents=True, exist_ok=True) destination_path_png.parent.mkdir(parents=True, exist_ok=True)
@ -224,16 +224,16 @@ def set_presets(workflow_data, preset_values):
def get_return_path(destination_path): def get_return_path(destination_path):
sd_dir = Path(SD_IMAGE_DIR) sd_dir = Path(IMG_DIR)
if destination_path.parent.samefile(sd_dir): if destination_path.parent.samefile(sd_dir):
return destination_path.name return destination_path.name
else: else:
return str(destination_path) return str(destination_path)
def get_scene(scene): def get_scene(scene):
with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file: with open(IMG_CONFIG_PATH, 'r') as IMG_CONFIG_file:
SD_CONFIG = yaml.safe_load(SD_CONFIG_file) IMG_CONFIG = yaml.safe_load(IMG_CONFIG_file)
for scene_data in SD_CONFIG['scenes']: for scene_data in IMG_CONFIG['scenes']:
if scene_data['scene'] == scene: if scene_data['scene'] == scene:
L.DEBUG(f"Found scene for \"{scene}\".") L.DEBUG(f"Found scene for \"{scene}\".")
return scene_data return scene_data
@ -246,9 +246,9 @@ def get_matching_scene(prompt):
prompt_lower = prompt.lower() prompt_lower = prompt.lower()
max_count = 0 max_count = 0
scene_data = None scene_data = None
with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file: with open(IMG_CONFIG_PATH, 'r') as IMG_CONFIG_file:
SD_CONFIG = yaml.safe_load(SD_CONFIG_file) IMG_CONFIG = yaml.safe_load(IMG_CONFIG_file)
for sc in SD_CONFIG['scenes']: for sc in IMG_CONFIG['scenes']:
count = sum(1 for trigger in sc['triggers'] if trigger in prompt_lower) count = sum(1 for trigger in sc['triggers'] if trigger in prompt_lower)
if count > max_count: if count > max_count:
max_count = count max_count = count
@ -259,7 +259,7 @@ def get_matching_scene(prompt):
return scene_data return scene_data
else: else:
L.DEBUG(f"No matching scenes found, falling back to default scene.") L.DEBUG(f"No matching scenes found, falling back to default scene.")
return SD_CONFIG['scenes'][0] return IMG_CONFIG['scenes'][0]
@ -400,7 +400,7 @@ async def get_generation_options():
async def load_workflow(workflow_path: str, workflow:str): async def load_workflow(workflow_path: str, workflow:str):
workflow_path = workflow_path if workflow_path else os.path.join(SD_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) workflow_path = workflow_path if workflow_path else os.path.join(IMG_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow)
with open(workflow_path, 'r') as file: with open(workflow_path, 'r') as file:
return json.load(file) return json.load(file)

View file

@ -1,6 +1,7 @@
''' '''
Interfaces with Ollama and creates an OpenAI-compatible relay API. Interfaces with Ollama and creates an OpenAI-compatible relay API.
''' '''
# routers/llm.py
from fastapi import APIRouter, HTTPException, Request, Response, BackgroundTasks, File, Form, UploadFile from fastapi import APIRouter, HTTPException, Request, Response, BackgroundTasks, File, Form, UploadFile
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
from datetime import datetime as dt_datetime from datetime import datetime as dt_datetime

View file

@ -1,3 +1,4 @@
# routers/news.py
import os import os
import uuid import uuid
import asyncio import asyncio
@ -90,7 +91,7 @@ async def download_and_save_article(article, site_name, earliest_date, bg_tasks:
try: try:
banner_url = article.top_image banner_url = article.top_image
if banner_url: if banner_url:
banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR)) banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR / f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')}.jpg"))
if banner_image: if banner_image:
banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]" banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
except Exception as e: except Exception as e:
@ -120,7 +121,7 @@ tags:
bg_tasks=bg_tasks, bg_tasks=bg_tasks,
text=tts_text, text=tts_text,
voice=voice, voice=voice,
model="eleven_turbo_v2", model="xtts2",
podcast=True, podcast=True,
title=audio_filename, title=audio_filename,
output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR
@ -176,7 +177,7 @@ async def process_news_site(site, bg_tasks: BackgroundTasks):
earliest_date, earliest_date,
bg_tasks, bg_tasks,
tts_mode=site.tts if hasattr(site, 'tts') else "off", tts_mode=site.tts if hasattr(site, 'tts') else "off",
voice=site.tts if hasattr(site, 'tts') else DEFAULT_11L_VOICE voice=site.voice if hasattr(site, 'voice') else DEFAULT_11L_VOICE
)) ))
tasks.append(task) tasks.append(task)
@ -237,12 +238,11 @@ async def archive_post(
async def clip_get( async def clip_get(
bg_tasks: BackgroundTasks, bg_tasks: BackgroundTasks,
url: str, url: str,
title: Optional[str] = Query(None),
encoding: str = Query('utf-8'),
tts: str = Query('summary'), tts: str = Query('summary'),
voice: str = Query(DEFAULT_VOICE) voice: str = Query(DEFAULT_VOICE)
): ):
markdown_filename = await process_article(bg_tasks, url, title, encoding, tts=tts, voice=voice) parsed_content = await parse_article(url)
markdown_filename = await process_article(bg_tasks, parsed_content, tts, voice)
return {"message": "Clip saved successfully", "markdown_filename": markdown_filename} return {"message": "Clip saved successfully", "markdown_filename": markdown_filename}
@news.post("/note/add") @news.post("/note/add")

View file

@ -1,6 +1,7 @@
''' '''
Manages an Obsidian vault, in particular daily notes, using information and functionality drawn from the other routers, primarily calendar, email, ig, llm, rag, img, serve, time, tts, and weather. Manages an Obsidian vault, in particular daily notes, using information and functionality drawn from the other routers, primarily calendar, email, ig, llm, rag, img, serve, time, tts, and weather.
''' '''
# routers/note.py
from fastapi import APIRouter, BackgroundTasks, File, UploadFile, Form, HTTPException, Response, Query, Path as FastAPIPath from fastapi import APIRouter, BackgroundTasks, File, UploadFile, Form, HTTPException, Response, Query, Path as FastAPIPath
from fastapi.responses import JSONResponse, PlainTextResponse from fastapi.responses import JSONResponse, PlainTextResponse
import os, re import os, re

View file

@ -30,7 +30,7 @@ from selenium.webdriver.support import expected_conditions as EC
from sijapi import ( from sijapi import (
L, LOGS_DIR, TS_ID, CASETABLE_PATH, COURTLISTENER_DOCKETS_URL, COURTLISTENER_API_KEY, L, LOGS_DIR, TS_ID, CASETABLE_PATH, COURTLISTENER_DOCKETS_URL, COURTLISTENER_API_KEY,
COURTLISTENER_BASE_URL, COURTLISTENER_DOCKETS_DIR, COURTLISTENER_SEARCH_DIR, ALERTS_DIR, COURTLISTENER_BASE_URL, COURTLISTENER_DOCKETS_DIR, COURTLISTENER_SEARCH_DIR, ALERTS_DIR,
MAC_UN, MAC_PW, MAC_ID, TS_TAILNET, DATA_DIR, SD_IMAGE_DIR, PUBLIC_KEY, OBSIDIAN_VAULT_DIR MAC_UN, MAC_PW, MAC_ID, TS_TAILNET, DATA_DIR, IMG_DIR, PUBLIC_KEY, OBSIDIAN_VAULT_DIR
) )
from sijapi.utilities import bool_convert, sanitize_filename, assemble_journal_path from sijapi.utilities import bool_convert, sanitize_filename, assemble_journal_path
from sijapi.routers import loc, note from sijapi.routers import loc, note
@ -44,7 +44,7 @@ async def get_pgp():
@serve.get("/img/{image_name}") @serve.get("/img/{image_name}")
def serve_image(image_name: str): def serve_image(image_name: str):
image_path = os.path.join(SD_IMAGE_DIR, image_name) image_path = os.path.join(IMG_DIR, image_name)
if os.path.exists(image_path): if os.path.exists(image_path):
return FileResponse(image_path) return FileResponse(image_path)
else: else:

View file

@ -14,7 +14,7 @@ from typing import Optional, Union, List
from pydub import AudioSegment from pydub import AudioSegment
from TTS.api import TTS from TTS.api import TTS
from pathlib import Path from pathlib import Path
from datetime import datetime from datetime import datetime as dt_datetime
from time import time from time import time
import torch import torch
import traceback import traceback
@ -66,21 +66,26 @@ async def list_11l_voices():
async def select_voice(voice_name: str) -> str:
def select_voice(voice_name: str) -> str:
try: try:
voice_file = VOICE_DIR / f"{voice_name}.wav" # Case Insensitive comparison
L.DEBUG(f"select_voice received query to use voice: {voice_name}. Looking for {voice_file} inside {VOICE_DIR}.") voice_name_lower = voice_name.lower()
L.DEBUG(f"Looking for {voice_name_lower}")
for item in VOICE_DIR.iterdir():
L.DEBUG(f"Checking {item.name.lower()}")
if item.name.lower() == f"{voice_name_lower}.wav":
L.DEBUG(f"select_voice received query to use voice: {voice_name}. Found {item} inside {VOICE_DIR}.")
return str(item)
L.ERR(f"Voice file not found")
raise HTTPException(status_code=404, detail="Voice file not found")
if voice_file.is_file():
return str(voice_file)
else:
raise HTTPException(status_code=404, detail="Voice file not found")
except Exception as e: except Exception as e:
L.ERR(f"Voice file not found: {str(e)}") L.ERR(f"Voice file not found: {str(e)}")
return None return None
@tts.post("/tts") @tts.post("/tts")
@tts.post("/tts/speak") @tts.post("/tts/speak")
@tts.post("/v1/audio/speech") @tts.post("/v1/audio/speech")
@ -132,13 +137,14 @@ async def generate_speech(
try: try:
model = model if model else await get_model(voice, voice_file) model = model if model else await get_model(voice, voice_file)
title = title if title else "TTS audio"
output_path = output_dir / f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')} {title}.wav"
if model == "eleven_turbo_v2": if model == "eleven_turbo_v2":
L.INFO("Using ElevenLabs.") L.INFO("Using ElevenLabs.")
audio_file_path = await elevenlabs_tts(model, text, voice, title, output_dir) audio_file_path = await elevenlabs_tts(model, text, voice, title, output_dir)
else: # if model == "xtts": else: # if model == "xtts":
L.INFO("Using XTTS2") L.INFO("Using XTTS2")
audio_file_path = await local_tts(text, speed, voice, voice_file, podcast, bg_tasks, title, output_dir) audio_file_path = await local_tts(text, speed, voice, voice_file, podcast, bg_tasks, title, output_path)
#else: #else:
# raise HTTPException(status_code=400, detail="Invalid model specified") # raise HTTPException(status_code=400, detail="Invalid model specified")
@ -158,7 +164,7 @@ async def generate_speech(
async def get_model(voice: str = None, voice_file: UploadFile = None): async def get_model(voice: str = None, voice_file: UploadFile = None):
if voice_file or (voice and select_voice(voice)): if voice_file or (voice and await select_voice(voice)):
return "xtts" return "xtts"
elif voice and await determine_voice_id(voice): elif voice and await determine_voice_id(voice):
@ -220,7 +226,7 @@ async def elevenlabs_tts(model: str, input_text: str, voice: str, title: str = N
async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client: # 5 minutes timeout async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client: # 5 minutes timeout
response = await client.post(url, json=payload, headers=headers) response = await client.post(url, json=payload, headers=headers)
output_dir = output_dir if output_dir else TTS_OUTPUT_DIR output_dir = output_dir if output_dir else TTS_OUTPUT_DIR
title = title if title else datetime.now().strftime("%Y%m%d%H%M%S") title = title if title else dt_datetime.now().strftime("%Y%m%d%H%M%S")
filename = f"{sanitize_filename(title)}.mp3" filename = f"{sanitize_filename(title)}.mp3"
file_path = Path(output_dir) / filename file_path = Path(output_dir) / filename
if response.status_code == 200: if response.status_code == 200:
@ -245,7 +251,9 @@ async def get_text_content(text: Optional[str], file: Optional[UploadFile]) -> s
async def get_voice_file_path(voice: str = None, voice_file: UploadFile = None) -> str: async def get_voice_file_path(voice: str = None, voice_file: UploadFile = None) -> str:
if voice: if voice:
return select_voice(voice) L.DEBUG(f"Looking for voice: {voice}")
selected_voice = await select_voice(voice)
return selected_voice
elif voice_file and isinstance(voice_file, UploadFile): elif voice_file and isinstance(voice_file, UploadFile):
VOICE_DIR.mkdir(exist_ok=True) VOICE_DIR.mkdir(exist_ok=True)
@ -272,8 +280,9 @@ async def get_voice_file_path(voice: str = None, voice_file: UploadFile = None)
return str(new_file) return str(new_file)
else: else:
L.DEBUG(f"{datetime.now().strftime('%Y%m%d%H%M%S')}: No voice specified or file provided, using default voice: {DEFAULT_VOICE}") L.DEBUG(f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')}: No voice specified or file provided, using default voice: {DEFAULT_VOICE}")
return select_voice(DEFAULT_VOICE) selected_voice = await select_voice(DEFAULT_VOICE)
return selected_voice
@ -290,7 +299,7 @@ async def local_tts(
if output_path: if output_path:
file_path = Path(output_path) file_path = Path(output_path)
else: else:
datetime_str = datetime.now().strftime("%Y%m%d%H%M%S") datetime_str = dt_datetime.now().strftime("%Y%m%d%H%M%S")
title = sanitize_filename(title) if title else "Audio" title = sanitize_filename(title) if title else "Audio"
filename = f"{datetime_str}_{title}.wav" filename = f"{datetime_str}_{title}.wav"
file_path = TTS_OUTPUT_DIR / filename file_path = TTS_OUTPUT_DIR / filename

View file

@ -1,3 +1,4 @@
# utilities.py
import re import re
import os import os
from fastapi import Form from fastapi import Form