Auto-update: Fri Aug 2 00:14:02 PDT 2024

This commit is contained in:
sanj 2024-08-02 00:14:02 -07:00
parent f7d5b06518
commit 49dd8746bf
7 changed files with 46 additions and 311 deletions

View file

@ -81,6 +81,7 @@ async def transcribe_endpoint(
# If we've reached this point, the transcription has taken too long
return JSONResponse(content={"status": "timeout", "message": "Transcription is taking longer than expected. Please check back later."}, status_code=202)
async def transcribe_audio(file_path, params: TranscribeParams):
debug(f"Transcribing audio file from {file_path}...")
file_path = await convert_to_wav(file_path)

View file

@ -6,29 +6,20 @@ DEPENDS ON:
*unimplemented.
'''
from fastapi import APIRouter, Request, Response, Query
from starlette.datastructures import Address
from fastapi import APIRouter, Request, Query
from fastapi.responses import JSONResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from aiohttp import ClientSession, ClientTimeout
from aiohttp import ClientSession
import aiofiles
from PIL import Image
from pathlib import Path
import uuid
import json
import yaml
import ipaddress
import socket
import subprocess
import os, re, io
import random
from io import BytesIO
import base64
import os
import asyncio
import shutil
# from photoprism.Session import Session
# from photoprism.Photo import Photo
# from webdav3.client import Client
from sijapi.routers.llm import query_ollama
from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, IMG_CONFIG_PATH, IMG_DIR, IMG_WORKFLOWS_DIR
@ -245,6 +236,7 @@ def get_scene(scene):
return scene_data
return None
# This returns the scene with the most trigger words present in the provided prompt,
# or otherwise if none match it returns the first scene in the array -
# meaning the first should be considered the default scene.
@ -269,11 +261,6 @@ def get_matching_scene(prompt):
import asyncio
import socket
import subprocess
from typing import Optional
async def ensure_comfy(retries: int = 4, timeout: float = 6.0):
"""
Ensures that ComfyUI is running, starting it if necessary.
@ -311,35 +298,6 @@ async def ensure_comfy(retries: int = 4, timeout: float = 6.0):
crit(f"Failed to ensure ComfyUI is running after {retries} attempts with {timeout} second intervals.")
raise RuntimeError(f"Failed to ensure ComfyUI is running after {retries} attempts with {timeout} second intervals.")
# async def upload_and_get_shareable_link(image_path):
# try:
# Set up the PhotoPrism session
# pp_session = Session(PHOTOPRISM_USER, PHOTOPRISM_PASS, PHOTOPRISM_URL, use_https=True)
# pp_session.create()
# Start import
# photo = Photo(pp_session)
# photo.start_import(path=os.path.dirname(image_path))
# Give PhotoPrism some time to process the upload
# await asyncio.sleep(5)
# Search for the uploaded photo
# photo_name = os.path.basename(image_path)
# search_results = photo.search(query=f"name:{photo_name}", count=1)
# if search_results['photos']:
# photo_uuid = search_results['photos'][0]['uuid']
# shareable_link = f"https://{PHOTOPRISM_URL}/p/{photo_uuid}"
# return shareable_link
# else:
# err("Could not find the uploaded photo details.")
# return None
# except Exception as e:
# err(f"Error in upload_and_get_shareable_link: {e}")
# return None
@img.get("/image/{prompt_id}")
async def get_image_status(prompt_id: str):
@ -455,7 +413,6 @@ Even more important, it finds and returns the key to the filepath where the file
return found_key[0]
async def queue_prompt(workflow_data):
await ensure_comfy()
async with ClientSession() as session:
@ -465,4 +422,4 @@ async def queue_prompt(workflow_data):
data = await response.json()
return data.get('prompt_id')
else:
raise Exception(f"Failed to queue prompt. Status code: {response.status}")
raise Exception(f"Failed to queue prompt. Status code: {response.status}")

View file

@ -23,7 +23,6 @@ from sijapi import L, News, OBSIDIAN_VAULT_DIR, OBSIDIAN_RESOURCES_DIR, DEFAULT_
from sijapi.utilities import sanitize_filename, assemble_journal_path, assemble_archive_path
from sijapi.routers import gis, llm, tts, note
news = APIRouter()
logger = L.get_module_logger("news")
def debug(text: str): logger.debug(text)
@ -407,188 +406,3 @@ async def save_file(file: UploadFile, folder: Path) -> Path:
with open(file_path, 'wb') as f:
shutil.copyfileobj(file.file, f)
return file_path
deprecated = '''
async def process_article2(
bg_tasks: BackgroundTasks,
parsed_content: Article,
tts_mode: str = "summary",
voice: str = DEFAULT_11L_VOICE
):
timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
readable_title = sanitize_filename(parsed_content.title or timestamp)
markdown_filename, relative_path = assemble_journal_path(dt_datetime.now(), subdir="Articles", filename=readable_title, extension=".md")
try:
summary = await llm.summarize_text(parsed_content.clean_doc, "Summarize the provided text. Respond with the summary and nothing else. Do not otherwise acknowledge the request. Just provide the requested summary.")
summary = summary.replace('\n', ' ') # Remove line breaks
if tts_mode == "full" or tts_mode == "content":
tts_text = parsed_content.clean_doc
elif tts_mode == "summary" or tts_mode == "excerpt":
tts_text = summary
else:
tts_text = None
banner_markdown = ''
try:
banner_url = parsed_content.top_image
if banner_url != '':
banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR))
if banner_image:
banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
except Exception as e:
err(f"No image found in article")
authors = ', '.join('[[{}]]'.format(author) for author in parsed_content.authors)
published_date = parsed_content.publish_date
frontmatter = f"""---
title: {readable_title}
authors: {authors}
published: {published_date}
added: {timestamp}
banner: "{banner_markdown}"
tags:
"""
frontmatter += '\n'.join(f" - {tag}" for tag in parsed_content.tags)
frontmatter += '\n---\n'
body = f"# {readable_title}\n\n"
if tts_text:
audio_filename = f"{published_date} {readable_title}"
try:
audio_path = await tts.generate_speech(bg_tasks=bg_tasks, text=tts_text, voice=voice, model="eleven_turbo_v2", podcast=True, title=audio_filename,
output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR)
audio_ext = Path(audio_path).suffix
obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
body += f"{obsidian_link}\n\n"
except Exception as e:
err(f"Failed to generate TTS for np3k. {e}")
try:
body += f"by {authors} in {parsed_content.canonical_link}" # update with method for getting the newspaper name
body += f"> [!summary]+\n"
body += f"> {summary}\n\n"
body += parsed_content["content"]
markdown_content = frontmatter + body
except Exception as e:
err(f"Failed to combine elements of article markdown.")
try:
with open(markdown_filename, 'w') as md_file:
md_file.write(markdown_content)
info(f"Successfully saved to {markdown_filename}")
await note.add_to_daily_note(relative_path)
return markdown_filename
except Exception as e:
err(f"Failed to write markdown file")
raise HTTPException(status_code=500, detail=str(e))
except Exception as e:
err(f"Failed to clip: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def process_article(
bg_tasks: BackgroundTasks,
url: str,
title: Optional[str] = None,
encoding: str = 'utf-8',
source: Optional[str] = None,
tts_mode: str = "summary",
voice: str = DEFAULT_11L_VOICE
):
timestamp = dt_datetime.now().strftime('%b %d, %Y at %H:%M')
parsed_content = await parse_article(url, source)
if parsed_content is None:
return {"error": "Failed to retrieve content"}
readable_title = sanitize_filename(title or parsed_content.get("title") or timestamp)
markdown_filename, relative_path = assemble_journal_path(dt_datetime.now(), subdir="Articles", filename=readable_title, extension=".md")
try:
summary = await llm.summarize_text(parsed_content["content"], "Summarize the provided text. Respond with the summary and nothing else. Do not otherwise acknowledge the request. Just provide the requested summary.")
summary = summary.replace('\n', ' ') # Remove line breaks
if tts_mode == "full" or tts_mode == "content":
tts_text = parsed_content["content"]
elif tts_mode == "summary" or tts_mode == "excerpt":
tts_text = summary
else:
tts_text = None
banner_markdown = ''
try:
banner_url = parsed_content.get('image', '')
if banner_url != '':
banner_image = download_file(banner_url, Path(OBSIDIAN_VAULT_DIR / OBSIDIAN_RESOURCES_DIR))
if banner_image:
banner_markdown = f"![[{OBSIDIAN_RESOURCES_DIR}/{banner_image}]]"
except Exception as e:
err(f"No image found in article")
authors = ', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))
frontmatter = f"""---
title: {readable_title}
authors: {', '.join('[[{}]]'.format(author) for author in parsed_content.get('authors', ['Unknown']))}
published: {parsed_content.get('date_published', 'Unknown')}
added: {timestamp}
excerpt: {parsed_content.get('excerpt', '')}
banner: "{banner_markdown}"
tags:
"""
frontmatter += '\n'.join(f" - {tag}" for tag in parsed_content.get('tags', []))
frontmatter += '\n---\n'
body = f"# {readable_title}\n\n"
if tts_text:
datetime_str = dt_datetime.now().strftime("%Y%m%d%H%M%S")
audio_filename = f"{datetime_str} {readable_title}"
try:
audio_path = await tts.generate_speech(bg_tasks=bg_tasks, text=tts_text, voice=voice, model="eleven_turbo_v2", podcast=True, title=audio_filename,
output_dir=Path(OBSIDIAN_VAULT_DIR) / OBSIDIAN_RESOURCES_DIR)
audio_ext = Path(audio_path).suffix
obsidian_link = f"![[{OBSIDIAN_RESOURCES_DIR}/{audio_filename}{audio_ext}]]"
body += f"{obsidian_link}\n\n"
except Exception as e:
err(f"Failed to generate TTS for np3k. {e}")
try:
body += f"by {authors} in [{parsed_content.get('domain', urlparse(url).netloc.replace('www.', ''))}]({url}).\n\n"
body += f"> [!summary]+\n"
body += f"> {summary}\n\n"
body += parsed_content["content"]
markdown_content = frontmatter + body
except Exception as e:
err(f"Failed to combine elements of article markdown.")
try:
with open(markdown_filename, 'w', encoding=encoding) as md_file:
md_file.write(markdown_content)
info(f"Successfully saved to {markdown_filename}")
await note.add_to_daily_note(relative_path)
return markdown_filename
except Exception as e:
err(f"Failed to write markdown file")
raise HTTPException(status_code=500, detail=str(e))
except Exception as e:
err(f"Failed to clip {url}: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
'''

View file

@ -383,7 +383,6 @@ created: "{dt_datetime.now().strftime("%Y-%m-%d %H:%M:%S")}"
async def build_daily_timeslips(date):
'''
@ -956,17 +955,6 @@ async def format_events_as_markdown(event_data: Dict[str, Union[str, List[Dict[s
if event['description']:
description = event['description']
# # This was intended to clean up the descriptions of Zoom and Teams events but is presently broken; should be an easy fix.
# if 'Zoom Meeting' in description:
# description_parts = description.split('---')
# if len(description_parts) > 2:
# description = description_parts[1].strip()
# if 'Microsoft Teams' in description:
# description_parts = description.split('---')
# if len(description_parts) > 2:
# event_data['description'] = description_parts[1].strip()
# description = remove_characters(description)
# description = remove_characters(description)
if len(description) > 150:
description = await llm.summarize_text(description, length_override=150)

View file

@ -572,10 +572,4 @@ def flag_emoji(country_code: str):
@time.head("/time/")
async def read_root():
return {}
@time.get("/time/")
async def root():
return {"message": "Ring, ring, ring, ring, ring, ring, ring. \n\n. Banana phone."}
return {}

View file

@ -167,8 +167,6 @@ async def generate_speech(
raise HTTPException(status_code=500, detail=f"Failed to generate speech: {str(e)}")
async def get_model(voice: str = None, voice_file: UploadFile = None):
if voice_file or (voice and await select_voice(voice)):
return "xtts"

View file

@ -2,6 +2,7 @@
Uses the VisualCrossing API and Postgres/PostGIS to source local weather forecasts and history.
'''
import asyncio
import traceback
from fastapi import APIRouter, HTTPException, Query
from fastapi import HTTPException
from fastapi.responses import JSONResponse
@ -166,7 +167,7 @@ async def store_weather_to_db(date_time: dt_datetime, weather_data: dict):
try:
daily_weather_query = '''
INSERT INTO DailyWeather (
INSERT INTO dailyweather (
sunrise, sunriseepoch, sunset, sunsetepoch, description,
tempmax, tempmin, uvindex, winddir, windspeed, icon, last_updated,
datetime, datetimeepoch, temp, feelslikemax, feelslikemin, feelslike,
@ -178,7 +179,7 @@ async def store_weather_to_db(date_time: dt_datetime, weather_data: dict):
RETURNING id
'''
daily_weather_id = await API.execute_write_query(daily_weather_query, *daily_weather_params, table_name="DailyWeather")
daily_weather_id = await API.execute_write_query(daily_weather_query, *daily_weather_params, table_name="dailyweather")
if 'hours' in day_data:
debug(f"Processing hours now...")
@ -219,13 +220,13 @@ async def store_weather_to_db(date_time: dt_datetime, weather_data: dict):
try:
hourly_weather_query = '''
INSERT INTO HourlyWeather (daily_weather_id, datetime, datetimeepoch, temp, feelslike, humidity, dew, precip, precipprob,
INSERT INTO hourlyweather (daily_weather_id, datetime, datetimeepoch, temp, feelslike, humidity, dew, precip, precipprob,
preciptype, snow, snowdepth, windgust, windspeed, winddir, pressure, cloudcover, visibility, solarradiation, solarenergy,
uvindex, severerisk, conditions, icon, stations, source)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)
RETURNING id
'''
hourly_weather_id = await API.execute_write_query(hourly_weather_query, *hourly_weather_params, table_name="HourlyWeather")
hourly_weather_id = await API.execute_write_query(hourly_weather_query, *hourly_weather_params, table_name="hourlyweather")
debug(f"Done processing hourly_weather_id {hourly_weather_id}")
except Exception as e:
err(f"EXCEPTION: {e}")
@ -241,60 +242,42 @@ async def store_weather_to_db(date_time: dt_datetime, weather_data: dict):
async def get_weather_from_db(date_time: dt_datetime, latitude: float, longitude: float):
warn(f"Using {date_time.strftime('%Y-%m-%d %H:%M:%S')} as our datetime in get_weather_from_db.")
async with API.get_connection() as conn:
query_date = date_time.date()
try:
# Query to get daily weather data
query = '''
SELECT DW.* FROM DailyWeather DW
WHERE DW.datetime::date = $1
AND ST_DWithin(DW.location::geography, ST_MakePoint($2,$3)::geography, 8046.72)
ORDER BY ST_Distance(DW.location, ST_MakePoint($4, $5)::geography) ASC
LIMIT 1
'''
query_date = date_time.date()
try:
# Query to get daily weather data
daily_query = '''
SELECT DW.* FROM dailyweather DW
WHERE DW.datetime::date = $1
AND ST_DWithin(DW.location::geography, ST_MakePoint($2,$3)::geography, 8046.72)
ORDER BY ST_Distance(DW.location, ST_MakePoint($4, $5)::geography) ASC
LIMIT 1
'''
daily_weather_record = await conn.fetchrow(query, query_date, longitude, latitude, longitude, latitude)
if daily_weather_record is None:
debug(f"No daily weather data retrieved from database.")
return None
# Convert asyncpg.Record to a mutable dictionary
daily_weather_data = dict(daily_weather_record)
# debug(f"Daily weather data prior to tz corrections: {daily_weather_data}")
# Now we can modify the dictionary
# tz = await GEO.tz_at(latitude, longitude)
# daily_weather_data['datetime'] = await gis.dt(daily_weather_data.get('datetime'), tz)
# daily_weather_data['sunrise'] = await gis.dt(daily_weather_data.get('sunrise'), tz)
# daily_weather_data['sunset'] = await gis.dt(daily_weather_data.get('sunset'), tz)
# debug(f"Daily weather data after tz corrections: {daily_weather_data}")
# Query to get hourly weather data
query = '''
SELECT HW.* FROM HourlyWeather HW
WHERE HW.daily_weather_id = $1
'''
hourly_weather_records = await conn.fetch(query, daily_weather_data['id'])
hourly_weather_data = []
for record in hourly_weather_records:
hour_data = dict(record)
# hour_data['datetime'] = await gis.dt(hour_data.get('datetime'), tz)
hourly_weather_data.append(hour_data)
# debug(f"Hourly weather data after tz corrections: {hourly_weather_data}")
day = {
'DailyWeather': daily_weather_data,
'HourlyWeather': hourly_weather_data,
}
# debug(f"day: {day}")
return day
except Exception as e:
err(f"Unexpected error occurred: {e}")
daily_weather_records = await API.execute_read_query(daily_query, query_date, longitude, latitude, longitude, latitude, table_name='dailyweather')
if not daily_weather_records:
debug(f"No daily weather data retrieved from database.")
return None
daily_weather_data = daily_weather_records[0] # Get the first (and only) record
# Query to get hourly weather data
hourly_query = '''
SELECT HW.* FROM hourlyweather HW
WHERE HW.daily_weather_id = $1
'''
hourly_weather_records = await API.execute_read_query(hourly_query, daily_weather_data['id'], table_name='hourlyweather')
day = {
'DailyWeather': daily_weather_data,
'HourlyWeather': hourly_weather_records,
}
return day
except Exception as e:
err(f"Unexpected error occurred in get_weather_from_db: {e}")
err(f"Traceback: {traceback.format_exc()}")
return None