From acb5e0ccaab4c8c799d373033259a5820889da85 Mon Sep 17 00:00:00 2001 From: sanj <67624670+iodrift@users.noreply.github.com> Date: Mon, 1 Jul 2024 20:48:53 -0700 Subject: [PATCH] Auto-update: Mon Jul 1 20:48:53 PDT 2024 --- Extras/Pythonista/uploadGPS.py | 59 +--------------------------------- sijapi/__init__.py | 1 + sijapi/__main__.py | 1 + sijapi/classes.py | 1 + sijapi/cli.py | 58 +++++++++++++++++++++++++++++++++ sijapi/helpers/upscaler.py | 30 +++++++++++------ sijapi/logs.py | 1 + sijapi/routers/asr.py | 1 + sijapi/routers/ig.py | 4 +-- sijapi/routers/img.py | 32 +++++++++--------- sijapi/routers/llm.py | 1 + sijapi/routers/news.py | 12 +++---- sijapi/routers/note.py | 1 + sijapi/routers/serve.py | 4 +-- sijapi/routers/tts.py | 43 +++++++++++++++---------- sijapi/utilities.py | 1 + 16 files changed, 140 insertions(+), 110 deletions(-) create mode 100644 sijapi/cli.py diff --git a/Extras/Pythonista/uploadGPS.py b/Extras/Pythonista/uploadGPS.py index 140adbf..e52f495 100644 --- a/Extras/Pythonista/uploadGPS.py +++ b/Extras/Pythonista/uploadGPS.py @@ -1,58 +1 @@ -import requests -import os -import json - -filename = 'location_log.json' -server = '!{!{ ENTER A PUBLIC URL TO YOUR SIJAPI INSTANCE }!}!' -api_key = '!{!{ ENTER YOUR GLOBAL_API_KEY HERE }!}!' - -def upload_location_data(data): - headers = { - 'Authorization': f'Bearer {api_key}', - 'Content-Type': 'application/json' - } - - try: - response = requests.post(f'{server}/locate', json=data, headers=headers) - if response.status_code == 200: - print('Location and weather updated successfully.') - os.remove(filename) - else: - print(f'Failed to post data. Status code: {response.status_code}') - print(response.text) - except requests.RequestException as e: - print(f'Error posting data: {e}') - -if not os.path.exists(filename): - print('No data to upload.') -else: - try: - with open(filename, 'r') as f: - data = json.load(f) - # Ensure all datetime fields are correctly named and add default context if missing - for location in data: - if 'date' in location: - location['datetime'] = location.pop('date') - # Ensure context dictionary exists with all required keys - if 'context' not in location: - location['context'] = { - 'action': 'manual', - 'device_type': 'Pythonista', - 'device_model': None, - 'device_name': None, - 'device_os': None - } - else: - context = location['context'] - context.setdefault('action', 'manual') - context.setdefault('device_type', 'Pythonista') - context.setdefault('device_model', None) - context.setdefault('device_name', None) - context.setdefault('device_os', None) - upload_location_data(data) - except FileNotFoundError: - print(f'File {filename} not found.') - except json.JSONDecodeError: - print(f'Error decoding JSON from {filename}.') - except Exception as e: - print(f'Unexpected error: {e}') +how \ No newline at end of file diff --git a/sijapi/__init__.py b/sijapi/__init__.py index eeae5a9..0aeb2e8 100644 --- a/sijapi/__init__.py +++ b/sijapi/__init__.py @@ -1,3 +1,4 @@ +# __init__.py import os import json import yaml diff --git a/sijapi/__main__.py b/sijapi/__main__.py index c3ee5f5..c5ef1bb 100755 --- a/sijapi/__main__.py +++ b/sijapi/__main__.py @@ -1,4 +1,5 @@ #!/Users/sij/miniforge3/envs/api/bin/python +#__main__.py from fastapi import FastAPI, Request, HTTPException, Response from fastapi.responses import JSONResponse from fastapi.middleware.cors import CORSMiddleware diff --git a/sijapi/classes.py b/sijapi/classes.py index 481cf05..1dd34c3 100644 --- a/sijapi/classes.py +++ b/sijapi/classes.py @@ -1,3 +1,4 @@ +# classes.py import asyncio import json import math diff --git a/sijapi/cli.py b/sijapi/cli.py new file mode 100644 index 0000000..0492540 --- /dev/null +++ b/sijapi/cli.py @@ -0,0 +1,58 @@ +# cli.py + +import click +import asyncio +from datetime import datetime as dt_datetime, timedelta + +# Import your async functions and dependencies +from your_main_app import build_daily_note_range_endpoint, loc + +def async_command(f): + @click.command() + @click.pass_context + def wrapper(ctx, *args, **kwargs): + async def run(): + return await f(*args, **kwargs) + return asyncio.run(run()) + return wrapper + +@click.group() +def cli(): + """CLI for your application.""" + pass + +@cli.command() +@click.argument('dt_start') +@click.argument('dt_end') +@async_command +async def bulk_note_range(dt_start: str, dt_end: str): + """ + Build daily notes for a date range. + + DT_START and DT_END should be in YYYY-MM-DD format. + """ + try: + start_date = dt_datetime.strptime(dt_start, "%Y-%m-%d") + end_date = dt_datetime.strptime(dt_end, "%Y-%m-%d") + except ValueError: + click.echo("Error: Dates must be in YYYY-MM-DD format.") + return + + if start_date > end_date: + click.echo("Error: Start date must be before or equal to end date.") + return + + results = [] + current_date = start_date + while current_date <= end_date: + formatted_date = await loc.dt(current_date) + result = await build_daily_note(formatted_date) + results.append(result) + current_date += timedelta(days=1) + + click.echo("Generated notes for the following dates:") + for url in results: + click.echo(url) + +if __name__ == '__main__': + cli() \ No newline at end of file diff --git a/sijapi/helpers/upscaler.py b/sijapi/helpers/upscaler.py index f6cb537..4e7ea5d 100644 --- a/sijapi/helpers/upscaler.py +++ b/sijapi/helpers/upscaler.py @@ -1,22 +1,34 @@ from aura_sr import AuraSR from PIL import Image +import torch +import os -aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR") +# Set environment variables for MPS +os.environ['PYTORCH_MPS_HIGH_WATERMARK_RATIO'] = '0.0' +os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' + +# Initialize device as CPU for default +device = torch.device('cpu') + +# Check if MPS is available +if torch.backends.mps.is_available(): + if not torch.backends.mps.is_built(): + print("MPS not available because the current PyTorch install was not built with MPS enabled.") + else: + device = torch.device('mps:0') + +# Overwrite the default CUDA device with MPS +torch.cuda.default_stream = device + +aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR").to(device) def load_image_from_path(file_path): - # Open image file return Image.open(file_path) def upscale_and_save(original_path): - # load the image from the path original_image = load_image_from_path(original_path) - - # upscale the image using the pretrained model upscaled_image = aura_sr.upscale_4x(original_image) - - # save the upscaled image back to the original path - # Overwrite the original image upscaled_image.save(original_path) -# Now to use the function, provide the image path +# Insert your image path upscale_and_save("/Users/sij/workshop/sijapi/sijapi/testbed/API__00482_ 2.png") diff --git a/sijapi/logs.py b/sijapi/logs.py index 2142a51..4c97f1a 100644 --- a/sijapi/logs.py +++ b/sijapi/logs.py @@ -1,3 +1,4 @@ +# logs.py import os import sys from loguru import logger diff --git a/sijapi/routers/asr.py b/sijapi/routers/asr.py index adf2caf..399ab15 100644 --- a/sijapi/routers/asr.py +++ b/sijapi/routers/asr.py @@ -1,6 +1,7 @@ ''' Uses whisper_cpp to create an OpenAI-compatible Whisper web service. ''' +# routers/asr.py import os import sys import uuid diff --git a/sijapi/routers/ig.py b/sijapi/routers/ig.py index 8dd1e48..6ab6775 100644 --- a/sijapi/routers/ig.py +++ b/sijapi/routers/ig.py @@ -66,7 +66,7 @@ IG_VISION_LLM = os.getenv("IG_VISION_LLM") IG_PROMPT_LLM = os.getenv("IG_PROMPT_LLM") IG_IMG_GEN = os.getenv("IG_IMG_GEN", "ComfyUI") IG_OUTPUT_PLATFORMS = os.getenv("IG_OUTPUT_PLATFORMS", "ig,ghost,obsidian").split(',') -SD_WORKFLOWS_DIR = os.path.join(COMFYUI_DIR, 'workflows') +IMG_WORKFLOWS_DIR = os.path.join(COMFYUI_DIR, 'workflows') COMFYUI_OUTPUT_DIR = COMFYUI_DIR / 'output' IG_PROFILES_DIR = os.path.join(BASE_DIR, 'profiles') IG_PROFILE_DIR = os.path.join(IG_PROFILES_DIR, PROFILE) @@ -793,7 +793,7 @@ def load_json(json_payload, workflow): if json_payload: return json.loads(json_payload) elif workflow: - workflow_path = os.path.join(SD_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) + workflow_path = os.path.join(IMG_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) with open(workflow_path, 'r') as file: return json.load(file) else: diff --git a/sijapi/routers/img.py b/sijapi/routers/img.py index e19ce14..7748a44 100644 --- a/sijapi/routers/img.py +++ b/sijapi/routers/img.py @@ -2,7 +2,7 @@ Image generation module using StableDiffusion and similar models by way of ComfyUI. DEPENDS ON: LLM module - COMFYUI_URL, COMFYUI_DIR, COMFYUI_OUTPUT_DIR, TS_SUBNET, TS_ADDRESS, DATA_DIR, SD_CONFIG_DIR, SD_IMAGE_DIR, SD_WORKFLOWS_DIR, LOCAL_HOSTS, API.URL, PHOTOPRISM_USER*, PHOTOPRISM_URL*, PHOTOPRISM_PASS* + COMFYUI_URL, COMFYUI_DIR, COMFYUI_OUTPUT_DIR, TS_SUBNET, TS_ADDRESS, DATA_DIR, IMG_CONFIG_DIR, IMG_DIR, IMG_WORKFLOWS_DIR, LOCAL_HOSTS, API.URL, PHOTOPRISM_USER*, PHOTOPRISM_URL*, PHOTOPRISM_PASS* *unimplemented. ''' @@ -30,7 +30,7 @@ import shutil # from photoprism.Photo import Photo # from webdav3.client import Client from sijapi.routers.llm import query_ollama -from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, SD_CONFIG_PATH, SD_IMAGE_DIR, SD_WORKFLOWS_DIR +from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, IMG_CONFIG_PATH, IMG_DIR, IMG_WORKFLOWS_DIR img = APIRouter() @@ -86,7 +86,7 @@ async def workflow(prompt: str, scene: str = None, size: str = None, earlyout: s width, height = map(int, size.split('x')) L.DEBUG(f"Parsed width: {width}; parsed height: {height}") - workflow_path = Path(SD_WORKFLOWS_DIR) / scene_workflow['workflow'] + workflow_path = Path(IMG_WORKFLOWS_DIR) / scene_workflow['workflow'] workflow_data = json.loads(workflow_path.read_text()) post = { @@ -104,7 +104,7 @@ async def workflow(prompt: str, scene: str = None, size: str = None, earlyout: s print(f"Prompt ID: {prompt_id}") max_size = max(width, height) if downscale_to_fit else None - destination_path = Path(destination_path).with_suffix(".jpg") if destination_path else SD_IMAGE_DIR / f"{prompt_id}.jpg" + destination_path = Path(destination_path).with_suffix(".jpg") if destination_path else IMG_DIR / f"{prompt_id}.jpg" if earlyout: asyncio.create_task(generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path)) @@ -132,7 +132,7 @@ async def generate_and_save_image(prompt_id, saved_file_key, max_size, destinati def get_web_path(file_path: Path) -> str: - uri = file_path.relative_to(SD_IMAGE_DIR) + uri = file_path.relative_to(IMG_DIR) web_path = f"{API.URL}/img/{uri}" return web_path @@ -174,8 +174,8 @@ async def get_image(status_data, key): async def save_as_jpg(image_data, prompt_id, max_size = None, quality = 100, destination_path: Path = None): - destination_path_png = (SD_IMAGE_DIR / prompt_id).with_suffix(".png") - destination_path_jpg = destination_path.with_suffix(".jpg") if destination_path else (SD_IMAGE_DIR / prompt_id).with_suffix(".jpg") + destination_path_png = (IMG_DIR / prompt_id).with_suffix(".png") + destination_path_jpg = destination_path.with_suffix(".jpg") if destination_path else (IMG_DIR / prompt_id).with_suffix(".jpg") try: destination_path_png.parent.mkdir(parents=True, exist_ok=True) @@ -224,16 +224,16 @@ def set_presets(workflow_data, preset_values): def get_return_path(destination_path): - sd_dir = Path(SD_IMAGE_DIR) + sd_dir = Path(IMG_DIR) if destination_path.parent.samefile(sd_dir): return destination_path.name else: return str(destination_path) def get_scene(scene): - with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file: - SD_CONFIG = yaml.safe_load(SD_CONFIG_file) - for scene_data in SD_CONFIG['scenes']: + with open(IMG_CONFIG_PATH, 'r') as IMG_CONFIG_file: + IMG_CONFIG = yaml.safe_load(IMG_CONFIG_file) + for scene_data in IMG_CONFIG['scenes']: if scene_data['scene'] == scene: L.DEBUG(f"Found scene for \"{scene}\".") return scene_data @@ -246,9 +246,9 @@ def get_matching_scene(prompt): prompt_lower = prompt.lower() max_count = 0 scene_data = None - with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file: - SD_CONFIG = yaml.safe_load(SD_CONFIG_file) - for sc in SD_CONFIG['scenes']: + with open(IMG_CONFIG_PATH, 'r') as IMG_CONFIG_file: + IMG_CONFIG = yaml.safe_load(IMG_CONFIG_file) + for sc in IMG_CONFIG['scenes']: count = sum(1 for trigger in sc['triggers'] if trigger in prompt_lower) if count > max_count: max_count = count @@ -259,7 +259,7 @@ def get_matching_scene(prompt): return scene_data else: L.DEBUG(f"No matching scenes found, falling back to default scene.") - return SD_CONFIG['scenes'][0] + return IMG_CONFIG['scenes'][0] @@ -400,7 +400,7 @@ async def get_generation_options(): async def load_workflow(workflow_path: str, workflow:str): - workflow_path = workflow_path if workflow_path else os.path.join(SD_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) + workflow_path = workflow_path if workflow_path else os.path.join(IMG_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow) with open(workflow_path, 'r') as file: return json.load(file) diff --git a/sijapi/routers/llm.py b/sijapi/routers/llm.py index d2be642..4a19e3b 100644 --- a/sijapi/routers/llm.py +++ b/sijapi/routers/llm.py @@ -1,6 +1,7 @@ ''' Interfaces with Ollama and creates an OpenAI-compatible relay API. ''' +# routers/llm.py from fastapi import APIRouter, HTTPException, Request, Response, BackgroundTasks, File, Form, UploadFile from fastapi.responses import StreamingResponse, JSONResponse, FileResponse from datetime import datetime as dt_datetime diff --git a/sijapi/routers/news.py b/sijapi/routers/news.py index 9d7c5d9..32097ca 100644 --- a/sijapi/routers/news.py +++ b/sijapi/routers/news.py @@ -1,3 +1,4 @@ +# routers/news.py import os import uuid import asyncio @@ -90,7 +91,7 @@ async def download_and_save_article(article, site_name, earliest_date, bg_tasks: try: banner_url = article.top_image if banner_url: - banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR)) + banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR / f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')}.jpg")) if banner_image: banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]" except Exception as e: @@ -120,7 +121,7 @@ tags: bg_tasks=bg_tasks, text=tts_text, voice=voice, - model="eleven_turbo_v2", + model="xtts2", podcast=True, title=audio_filename, output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR @@ -176,7 +177,7 @@ async def process_news_site(site, bg_tasks: BackgroundTasks): earliest_date, bg_tasks, tts_mode=site.tts if hasattr(site, 'tts') else "off", - voice=site.tts if hasattr(site, 'tts') else DEFAULT_11L_VOICE + voice=site.voice if hasattr(site, 'voice') else DEFAULT_11L_VOICE )) tasks.append(task) @@ -237,12 +238,11 @@ async def archive_post( async def clip_get( bg_tasks: BackgroundTasks, url: str, - title: Optional[str] = Query(None), - encoding: str = Query('utf-8'), tts: str = Query('summary'), voice: str = Query(DEFAULT_VOICE) ): - markdown_filename = await process_article(bg_tasks, url, title, encoding, tts=tts, voice=voice) + parsed_content = await parse_article(url) + markdown_filename = await process_article(bg_tasks, parsed_content, tts, voice) return {"message": "Clip saved successfully", "markdown_filename": markdown_filename} @news.post("/note/add") diff --git a/sijapi/routers/note.py b/sijapi/routers/note.py index 38ae5ad..1fd2f00 100644 --- a/sijapi/routers/note.py +++ b/sijapi/routers/note.py @@ -1,6 +1,7 @@ ''' Manages an Obsidian vault, in particular daily notes, using information and functionality drawn from the other routers, primarily calendar, email, ig, llm, rag, img, serve, time, tts, and weather. ''' +# routers/note.py from fastapi import APIRouter, BackgroundTasks, File, UploadFile, Form, HTTPException, Response, Query, Path as FastAPIPath from fastapi.responses import JSONResponse, PlainTextResponse import os, re diff --git a/sijapi/routers/serve.py b/sijapi/routers/serve.py index 67a617d..17a4416 100644 --- a/sijapi/routers/serve.py +++ b/sijapi/routers/serve.py @@ -30,7 +30,7 @@ from selenium.webdriver.support import expected_conditions as EC from sijapi import ( L, LOGS_DIR, TS_ID, CASETABLE_PATH, COURTLISTENER_DOCKETS_URL, COURTLISTENER_API_KEY, COURTLISTENER_BASE_URL, COURTLISTENER_DOCKETS_DIR, COURTLISTENER_SEARCH_DIR, ALERTS_DIR, - MAC_UN, MAC_PW, MAC_ID, TS_TAILNET, DATA_DIR, SD_IMAGE_DIR, PUBLIC_KEY, OBSIDIAN_VAULT_DIR + MAC_UN, MAC_PW, MAC_ID, TS_TAILNET, DATA_DIR, IMG_DIR, PUBLIC_KEY, OBSIDIAN_VAULT_DIR ) from sijapi.utilities import bool_convert, sanitize_filename, assemble_journal_path from sijapi.routers import loc, note @@ -44,7 +44,7 @@ async def get_pgp(): @serve.get("/img/{image_name}") def serve_image(image_name: str): - image_path = os.path.join(SD_IMAGE_DIR, image_name) + image_path = os.path.join(IMG_DIR, image_name) if os.path.exists(image_path): return FileResponse(image_path) else: diff --git a/sijapi/routers/tts.py b/sijapi/routers/tts.py index b5b8dc0..2972607 100644 --- a/sijapi/routers/tts.py +++ b/sijapi/routers/tts.py @@ -14,7 +14,7 @@ from typing import Optional, Union, List from pydub import AudioSegment from TTS.api import TTS from pathlib import Path -from datetime import datetime +from datetime import datetime as dt_datetime from time import time import torch import traceback @@ -66,21 +66,26 @@ async def list_11l_voices(): - -def select_voice(voice_name: str) -> str: +async def select_voice(voice_name: str) -> str: try: - voice_file = VOICE_DIR / f"{voice_name}.wav" - L.DEBUG(f"select_voice received query to use voice: {voice_name}. Looking for {voice_file} inside {VOICE_DIR}.") + # Case Insensitive comparison + voice_name_lower = voice_name.lower() + L.DEBUG(f"Looking for {voice_name_lower}") + for item in VOICE_DIR.iterdir(): + L.DEBUG(f"Checking {item.name.lower()}") + if item.name.lower() == f"{voice_name_lower}.wav": + L.DEBUG(f"select_voice received query to use voice: {voice_name}. Found {item} inside {VOICE_DIR}.") + return str(item) + + L.ERR(f"Voice file not found") + raise HTTPException(status_code=404, detail="Voice file not found") - if voice_file.is_file(): - return str(voice_file) - else: - raise HTTPException(status_code=404, detail="Voice file not found") except Exception as e: L.ERR(f"Voice file not found: {str(e)}") return None + @tts.post("/tts") @tts.post("/tts/speak") @tts.post("/v1/audio/speech") @@ -132,13 +137,14 @@ async def generate_speech( try: model = model if model else await get_model(voice, voice_file) - + title = title if title else "TTS audio" + output_path = output_dir / f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')} {title}.wav" if model == "eleven_turbo_v2": L.INFO("Using ElevenLabs.") audio_file_path = await elevenlabs_tts(model, text, voice, title, output_dir) else: # if model == "xtts": L.INFO("Using XTTS2") - audio_file_path = await local_tts(text, speed, voice, voice_file, podcast, bg_tasks, title, output_dir) + audio_file_path = await local_tts(text, speed, voice, voice_file, podcast, bg_tasks, title, output_path) #else: # raise HTTPException(status_code=400, detail="Invalid model specified") @@ -158,7 +164,7 @@ async def generate_speech( async def get_model(voice: str = None, voice_file: UploadFile = None): - if voice_file or (voice and select_voice(voice)): + if voice_file or (voice and await select_voice(voice)): return "xtts" elif voice and await determine_voice_id(voice): @@ -220,7 +226,7 @@ async def elevenlabs_tts(model: str, input_text: str, voice: str, title: str = N async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client: # 5 minutes timeout response = await client.post(url, json=payload, headers=headers) output_dir = output_dir if output_dir else TTS_OUTPUT_DIR - title = title if title else datetime.now().strftime("%Y%m%d%H%M%S") + title = title if title else dt_datetime.now().strftime("%Y%m%d%H%M%S") filename = f"{sanitize_filename(title)}.mp3" file_path = Path(output_dir) / filename if response.status_code == 200: @@ -245,7 +251,9 @@ async def get_text_content(text: Optional[str], file: Optional[UploadFile]) -> s async def get_voice_file_path(voice: str = None, voice_file: UploadFile = None) -> str: if voice: - return select_voice(voice) + L.DEBUG(f"Looking for voice: {voice}") + selected_voice = await select_voice(voice) + return selected_voice elif voice_file and isinstance(voice_file, UploadFile): VOICE_DIR.mkdir(exist_ok=True) @@ -272,8 +280,9 @@ async def get_voice_file_path(voice: str = None, voice_file: UploadFile = None) return str(new_file) else: - L.DEBUG(f"{datetime.now().strftime('%Y%m%d%H%M%S')}: No voice specified or file provided, using default voice: {DEFAULT_VOICE}") - return select_voice(DEFAULT_VOICE) + L.DEBUG(f"{dt_datetime.now().strftime('%Y%m%d%H%M%S')}: No voice specified or file provided, using default voice: {DEFAULT_VOICE}") + selected_voice = await select_voice(DEFAULT_VOICE) + return selected_voice @@ -290,7 +299,7 @@ async def local_tts( if output_path: file_path = Path(output_path) else: - datetime_str = datetime.now().strftime("%Y%m%d%H%M%S") + datetime_str = dt_datetime.now().strftime("%Y%m%d%H%M%S") title = sanitize_filename(title) if title else "Audio" filename = f"{datetime_str}_{title}.wav" file_path = TTS_OUTPUT_DIR / filename diff --git a/sijapi/utilities.py b/sijapi/utilities.py index e9f6395..6ee7fa4 100644 --- a/sijapi/utilities.py +++ b/sijapi/utilities.py @@ -1,3 +1,4 @@ +# utilities.py import re import os from fastapi import Form