Auto-update: Mon Aug 12 10:35:07 PDT 2024

This commit is contained in:
sanj 2024-08-12 10:35:07 -07:00
parent 14319edaa5
commit 013a9858c9
41 changed files with 0 additions and 48411 deletions

View file

@ -1,237 +0,0 @@
#!/usr/bin/env python3
import requests
import json
import time
import os
import subprocess
import sys
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from datetime import datetime
# Environment variables for database connection
DB_NAME = os.getenv('DB_NAME', 'sij')
DB_USER = os.getenv('DB_USER', 'sij')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'Synchr0!')
DB_HOST = os.getenv('DB_HOST', 'localhost')
DB_PORT = os.getenv('DB_PORT', '5432')
def get_feature_count(url):
params = {
'where': '1=1',
'returnCountOnly': 'true',
'f': 'json'
}
retries = Retry(total=10, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
data = response.json()
return data.get('count', 0)
def fetch_features(url, offset, num, max_retries=5):
params = {
'where': '1=1',
'outFields': '*',
'geometryPrecision': 6,
'outSR': 4326,
'f': 'json',
'resultOffset': offset,
'resultRecordCount': num
}
for attempt in range(max_retries):
try:
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching features (attempt {attempt + 1}/{max_retries}): {e}")
if attempt == max_retries - 1:
raise
time.sleep(5 * (attempt + 1)) # Exponential backoff
def download_layer(layer_num, layer_name):
base_dir = os.path.expanduser('~/data')
os.makedirs(base_dir, exist_ok=True)
file_path = os.path.join(base_dir, f'PLSS_{layer_name}.geojson')
temp_file_path = os.path.join(base_dir, f'PLSS_{layer_name}_temp.json')
url = f"https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer/{layer_num}/query"
total_count = get_feature_count(url)
print(f"Total {layer_name} features: {total_count}")
batch_size = 1000
chunk_size = 10000 # Write to file every 10,000 features
offset = 0
all_features = []
# Check if temporary file exists and load its content
if os.path.exists(temp_file_path):
try:
with open(temp_file_path, 'r') as f:
all_features = json.load(f)
offset = len(all_features)
print(f"Resuming download from offset {offset}")
except json.JSONDecodeError:
print("Error reading temporary file. Starting download from the beginning.")
offset = 0
all_features = []
try:
while offset < total_count:
print(f"Fetching {layer_name} features {offset} to {offset + batch_size}...")
data = fetch_features(url, offset, batch_size)
new_features = data.get('features', [])
if not new_features:
break
all_features.extend(new_features)
offset += len(new_features)
# Progress indicator
progress = offset / total_count
bar_length = 30
filled_length = int(bar_length * progress)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
print(f'\rProgress: [{bar}] {progress:.1%} ({offset}/{total_count} features)', end='', flush=True)
# Save progress to temporary file every chunk_size features
if len(all_features) % chunk_size == 0:
with open(temp_file_path, 'w') as f:
json.dump(all_features, f)
time.sleep(1)
print(f"\nTotal {layer_name} features fetched: {len(all_features)}")
# Write final GeoJSON file
with open(file_path, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for i, feature in enumerate(all_features):
geojson_feature = {
"type": "Feature",
"properties": feature['attributes'],
"geometry": feature['geometry']
}
json.dump(geojson_feature, f)
if i < len(all_features) - 1:
f.write(',\n')
f.write('\n]}')
print(f"GeoJSON file saved as '{file_path}'")
# Remove temporary file
if os.path.exists(temp_file_path):
os.remove(temp_file_path)
return file_path
except Exception as e:
print(f"\nError during download: {e}")
print(f"Partial data saved in {temp_file_path}")
return None
def check_postgres_connection():
try:
subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME, '-c', 'SELECT 1;'],
check=True, capture_output=True, text=True)
return True
except subprocess.CalledProcessError:
return False
def check_postgis_extension():
try:
result = subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME,
'-c', "SELECT 1 FROM pg_extension WHERE extname = 'postgis';"],
check=True, capture_output=True, text=True)
return '1' in result.stdout
except subprocess.CalledProcessError:
return False
def create_postgis_extension():
try:
subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME,
'-c', "CREATE EXTENSION IF NOT EXISTS postgis;"],
check=True, capture_output=True, text=True)
print("PostGIS extension created successfully.")
except subprocess.CalledProcessError as e:
print(f"Error creating PostGIS extension: {e}")
sys.exit(1)
def import_to_postgis(file_path, table_name):
if not check_postgres_connection():
print("Error: Unable to connect to PostgreSQL. Please check your connection settings.")
sys.exit(1)
if not check_postgis_extension():
print("PostGIS extension not found. Attempting to create it...")
create_postgis_extension()
ogr2ogr_command = [
'ogr2ogr',
'-f', 'PostgreSQL',
f'PG:dbname={DB_NAME} user={DB_USER} password={DB_PASSWORD} host={DB_HOST} port={DB_PORT}',
file_path,
'-nln', table_name,
'-overwrite'
]
try:
subprocess.run(ogr2ogr_command, check=True, capture_output=True, text=True)
print(f"Data successfully imported into PostGIS table: {table_name}")
except subprocess.CalledProcessError as e:
print(f"Error importing data into PostGIS: {e}")
print(f"Command that failed: {e.cmd}")
print(f"Error output: {e.stderr}")
def check_ogr2ogr():
try:
subprocess.run(['ogr2ogr', '--version'], check=True, capture_output=True, text=True)
return True
except subprocess.CalledProcessError:
return False
except FileNotFoundError:
return False
def main():
if not check_ogr2ogr():
print("Error: ogr2ogr not found. Please install GDAL/OGR tools.")
print("On Debian: sudo apt-get install gdal-bin")
print("On macOS with Homebrew: brew install gdal")
sys.exit(1)
try:
township_file = os.path.expanduser('~/data/PLSS_Townships.geojson')
if not os.path.exists(township_file):
township_file = download_layer(1, "Townships")
if township_file:
import_to_postgis(township_file, "public.plss_townships")
else:
print("Failed to download Townships data. Skipping import.")
section_file = os.path.expanduser('~/data/PLSS_Sections.geojson')
if not os.path.exists(section_file):
section_file = download_layer(2, "Sections")
if section_file:
import_to_postgis(section_file, "public.plss_sections")
else:
print("Failed to download Sections data. Skipping import.")
except requests.exceptions.RequestException as e:
print(f"Error fetching data: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,133 +0,0 @@
# CaPLSS_downloader_and_importer.py
import requests
import json
import time
import os
import subprocess
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
def get_feature_count(url):
params = {
'where': '1=1',
'returnCountOnly': 'true',
'f': 'json'
}
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session = requests.Session()
session.mount("https://", adapter)
response = session.get(url, params=params, timeout=15) # Add timeout parameter
response.raise_for_status()
data = response.json()
return data.get('count', 0)
def fetch_features(url, offset, num):
params = {
'where': '1=1',
'outFields': '*',
'geometryPrecision': 6,
'outSR': 4326,
'f': 'json',
'resultOffset': offset,
'resultRecordCount': num
}
response = requests.get(url, params=params)
response.raise_for_status()
return response.json()
def download_layer(layer_num, layer_name):
url = f"https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer/{layer_num}/query"
total_count = get_feature_count(url)
print(f"Total {layer_name} features: {total_count}")
batch_size = 1000
offset = 0
all_features = []
while offset < total_count:
print(f"Fetching {layer_name} features {offset} to {offset + batch_size}...")
data = fetch_features(url, offset, batch_size)
new_features = data.get('features', [])
if not new_features:
break
all_features.extend(new_features)
offset += len(new_features)
print(f"Progress: {len(all_features)}/{total_count} features")
time.sleep(1) # Be nice to the server
print(f"Total {layer_name} features fetched: {len(all_features)}")
# Convert to GeoJSON
geojson_features = [
{
"type": "Feature",
"properties": feature['attributes'],
"geometry": feature['geometry']
} for feature in all_features
]
full_geojson = {
"type": "FeatureCollection",
"features": geojson_features
}
# Define a base directory that exists on both macOS and Debian
base_dir = os.path.expanduser('~/data')
os.makedirs(base_dir, exist_ok=True) # Create the directory if it doesn't exist
# Use os.path.join to construct the file path
file_path = os.path.join(base_dir, f'PLSS_{layer_name}.geojson')
# Save to file
with open(file_path, 'w') as f:
json.dump(full_geojson, f)
print(f"GeoJSON file saved as '{file_path}'")
return file_path
def import_to_postgis(file_path, table_name):
db_name = 'sij'
db_user = 'sij'
db_password = 'Synchr0!'
ogr2ogr_command = [
'ogr2ogr',
'-f', 'PostgreSQL',
f'PG:dbname={db_name} user={db_user} password={db_password}',
file_path,
'-nln', table_name,
'-overwrite'
]
subprocess.run(ogr2ogr_command, check=True)
print(f"Data successfully imported into PostGIS table: {table_name}")
def main():
try:
# Download and import Townships (Layer 1)
township_file = download_layer(1, "Townships")
import_to_postgis(township_file, "public.plss_townships")
# Download and import Sections (Layer 2)
section_file = download_layer(2, "Sections")
import_to_postgis(section_file, "public.plss_sections")
except requests.exceptions.RequestException as e:
print(f"Error fetching data: {e}")
except subprocess.CalledProcessError as e:
print(f"Error importing data into PostGIS: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,73 +0,0 @@
import requests
import PyPDF2
import io
import re
def scrape_data_from_pdf(url):
response = requests.get(url)
pdf_file = io.BytesIO(response.content)
pdf_reader = PyPDF2.PdfReader(pdf_file)
all_text = ""
for page in pdf_reader.pages:
all_text += page.extract_text() + "\n"
return all_text
def parse_data(raw_data):
lines = raw_data.split('\n')
data = []
current_entry = None
for line in lines:
line = line.strip()
if re.match(r'\d+-\d+-\d+-\w+', line):
if current_entry:
data.append(current_entry)
current_entry = {'Harvest Document': line, 'Raw Data': []}
elif current_entry:
current_entry['Raw Data'].append(line)
if current_entry:
data.append(current_entry)
return data
def filter_data(data):
return [entry for entry in data if any(owner.lower() in ' '.join(entry['Raw Data']).lower() for owner in ["Sierra Pacific", "SPI", "Land & Timber"])]
def extract_location(raw_data):
location = []
for line in raw_data:
if 'MDBM:' in line or 'HBM:' in line:
location.append(line)
return ' '.join(location)
def extract_plss_coordinates(text):
pattern = r'(\w+): T(\d+)([NSEW]) R(\d+)([NSEW]) S(\d+)'
return re.findall(pattern, text)
# Main execution
url = "https://caltreesplans.resources.ca.gov/Caltrees/Report/ShowReport.aspx?module=TH_Document&reportID=492&reportType=LINK_REPORT_LIST"
raw_data = scrape_data_from_pdf(url)
parsed_data = parse_data(raw_data)
print(f"Total timber plans parsed: {len(parsed_data)}")
filtered_data = filter_data(parsed_data)
print(f"Found {len(filtered_data)} matching entries.")
for plan in filtered_data:
print("\nHarvest Document:", plan['Harvest Document'])
location = extract_location(plan['Raw Data'])
print("Location:", location)
plss_coordinates = extract_plss_coordinates(location)
print("PLSS Coordinates:")
for coord in plss_coordinates:
meridian, township, township_dir, range_, range_dir, section = coord
print(f" {meridian}: T{township}{township_dir} R{range_}{range_dir} S{section}")
print("-" * 50)

View file

@ -1 +0,0 @@
{"offset": 50000}

View file

@ -1,23 +0,0 @@
#!/Users/sij/miniforge3/envs/sijapi/bin/python
import sys
import asyncio
from fastapi import BackgroundTasks
from sijapi.routers.news import process_and_save_article
async def main():
if len(sys.argv) != 2:
print("Usage: python script.py <article_url>")
sys.exit(1)
url = sys.argv[1]
bg_tasks = BackgroundTasks()
try:
result = await process_and_save_article(bg_tasks, url)
print(result)
except Exception as e:
print(f"Error processing article: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,71 +0,0 @@
import asyncio
import asyncpg
import yaml
from pathlib import Path
async def load_config():
config_path = Path(__file__).parent.parent / 'config' / 'db.yaml'
with open(config_path, 'r') as file:
return yaml.safe_load(file)
async def add_foreign_key_constraint(conn):
# Ensure short_code is not null in both tables
await conn.execute("""
ALTER TABLE short_urls
ALTER COLUMN short_code SET NOT NULL;
""")
await conn.execute("""
ALTER TABLE click_logs
ALTER COLUMN short_code SET NOT NULL;
""")
# Add unique constraint to short_urls.short_code if it doesn't exist
await conn.execute("""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_constraint
WHERE conname = 'short_urls_short_code_key'
) THEN
ALTER TABLE short_urls
ADD CONSTRAINT short_urls_short_code_key UNIQUE (short_code);
END IF;
END $$;
""")
# Add foreign key constraint
await conn.execute("""
ALTER TABLE click_logs
ADD CONSTRAINT fk_click_logs_short_urls
FOREIGN KEY (short_code)
REFERENCES short_urls(short_code)
ON DELETE CASCADE;
""")
print("Foreign key constraint added successfully.")
async def main():
config = await load_config()
source_server = config['POOL'][0] # sij-mbp16
conn_params = {
'database': source_server['db_name'],
'user': source_server['db_user'],
'password': source_server['db_pass'],
'host': source_server['ts_ip'],
'port': source_server['db_port']
}
conn = await asyncpg.connect(**conn_params)
try:
await add_foreign_key_constraint(conn)
except Exception as e:
print(f"An error occurred: {str(e)}")
finally:
await conn.close()
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,25 +0,0 @@
version: '3.8'
services:
db:
image: postgis/postgis:16-3.4
container_name: sij_postgres
environment:
POSTGRES_DB: sij
POSTGRES_USER: sij
POSTGRES_PASSWORD: Synchr0!
volumes:
- postgres_data:/var/lib/postgresql/data
- ./init-db.sh:/docker-entrypoint-initdb.d/init-db.sh
ports:
- "5432:5432"
networks:
- sij_network
networks:
sij_network:
driver: bridge
volumes:
postgres_data:

View file

@ -1,11 +0,0 @@
#!/bin/bash
set -e
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS postgis_topology;
EOSQL
# Modify pg_hba.conf to allow connections from Tailscale network
echo "host all all 100.64.64.0/24 trust" >> /var/lib/postgresql/data/pg_hba.conf

View file

@ -1,66 +0,0 @@
import asyncio
import asyncpg
import psycopg2
import sys
async def try_async_connect(host, port, user, password, database):
try:
conn = await asyncpg.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
version = await conn.fetchval('SELECT version()')
print(f"Async connection successful to {host}:{port}")
print(f"PostgreSQL version: {version}")
await conn.close()
return True
except Exception as e:
print(f"Async connection failed to {host}:{port}")
print(f"Error: {str(e)}")
return False
def try_sync_connect(host, port, user, password, database):
try:
conn = psycopg2.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
cur = conn.cursor()
cur.execute('SELECT version()')
version = cur.fetchone()[0]
print(f"Sync connection successful to {host}:{port}")
print(f"PostgreSQL version: {version}")
conn.close()
return True
except Exception as e:
print(f"Sync connection failed to {host}:{port}")
print(f"Error: {str(e)}")
return False
async def main():
# Database connection parameters
port = 5432
user = 'sij'
password = 'Synchr0!'
database = 'sij'
hosts = ['100.64.64.20', '127.0.0.1', 'localhost']
print("Attempting asynchronous connections:")
for host in hosts:
await try_async_connect(host, port, user, password, database)
print()
print("Attempting synchronous connections:")
for host in hosts:
try_sync_connect(host, port, user, password, database)
print()
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,89 +0,0 @@
import psycopg2
from psycopg2 import sql
def connect_to_db():
return psycopg2.connect(
dbname='sij',
user='sij',
password='Synchr0!',
host='localhost' # Adjust if your database is not on localhost
)
def get_table_info(conn):
with conn.cursor() as cur:
# Get all tables in the public schema
cur.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
""")
tables = cur.fetchall()
table_info = {}
for (table_name,) in tables:
table_info[table_name] = {
'primary_keys': get_primary_keys(cur, table_name),
'foreign_keys': get_foreign_keys(cur, table_name)
}
return table_info
def get_primary_keys(cur, table_name):
cur.execute("""
SELECT a.attname
FROM pg_index i
JOIN pg_attribute a ON a.attrelid = i.indrelid
AND a.attnum = ANY(i.indkey)
WHERE i.indrelid = %s::regclass
AND i.indisprimary
""", (table_name,))
return [row[0] for row in cur.fetchall()]
def get_foreign_keys(cur, table_name):
cur.execute("""
SELECT
tc.constraint_name,
kcu.column_name,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name
FROM
information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name=%s
""", (table_name,))
return cur.fetchall()
def main():
try:
with connect_to_db() as conn:
table_info = get_table_info(conn)
for table_name, info in table_info.items():
print(f"\n## Table: {table_name}")
print("\nPrimary Keys:")
if info['primary_keys']:
for pk in info['primary_keys']:
print(f"- {pk}")
else:
print("- No primary keys found")
print("\nForeign Keys:")
if info['foreign_keys']:
for fk in info['foreign_keys']:
print(f"- {fk[1]} -> {fk[2]}.{fk[3]} (Constraint: {fk[0]})")
else:
print("- No foreign keys found")
except psycopg2.Error as e:
print(f"Database error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,83 +0,0 @@
import yaml
import subprocess
import os
import sys
def load_config():
with open('../config/sys.yaml', 'r') as file:
return yaml.safe_load(file)
def run_command(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
return process.returncode, stdout.decode(), stderr.decode()
def pg_dump(host, port, db_name, user, password, tables):
dump_command = f"PGPASSWORD={password} pg_dump -h {host} -p {port} -U {user} -d {db_name} -t {' -t '.join(tables)} -c --no-owner"
return run_command(dump_command)
def pg_restore(host, port, db_name, user, password, dump_data):
restore_command = f"PGPASSWORD={password} psql -h {host} -p {port} -U {user} -d {db_name}"
process = subprocess.Popen(restore_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate(input=dump_data.encode())
return process.returncode, stdout.decode(), stderr.decode()
def check_postgres_version(host, port, user, password):
version_command = f"PGPASSWORD={password} psql -h {host} -p {port} -U {user} -c 'SELECT version();'"
returncode, stdout, stderr = run_command(version_command)
if returncode == 0:
return stdout.strip()
else:
return f"Error checking version: {stderr}"
def replicate_databases():
config = load_config()
pool = config['POOL']
tables_to_replicate = ['click_logs', 'dailyweather', 'hourlyweather', 'locations', 'short_urls']
source_db = pool[0]
target_dbs = pool[1:]
# Check source database version
source_version = check_postgres_version(source_db['ts_ip'], source_db['db_port'], source_db['db_user'], source_db['db_pass'])
print(f"Source database version: {source_version}")
for target_db in target_dbs:
print(f"\nReplicating to {target_db['ts_id']}...")
# Check target database version
target_version = check_postgres_version(target_db['ts_ip'], target_db['db_port'], target_db['db_user'], target_db['db_pass'])
print(f"Target database version: {target_version}")
# Perform dump
returncode, dump_data, stderr = pg_dump(
source_db['ts_ip'],
source_db['db_port'],
source_db['db_name'],
source_db['db_user'],
source_db['db_pass'],
tables_to_replicate
)
if returncode != 0:
print(f"Error during dump: {stderr}")
continue
# Perform restore
returncode, stdout, stderr = pg_restore(
target_db['ts_ip'],
target_db['db_port'],
target_db['db_name'],
target_db['db_user'],
target_db['db_pass'],
dump_data
)
if returncode != 0:
print(f"Error during restore: {stderr}")
else:
print(f"Replication to {target_db['ts_id']} completed successfully.")
if __name__ == "__main__":
replicate_databases()

View file

@ -1,76 +0,0 @@
#!/usr/bin/env python3
import os
import yaml
import subprocess
def load_config():
script_dir = os.path.dirname(os.path.abspath(__file__))
sys_config_path = os.path.join(script_dir, '..', 'config', 'sys.yaml')
gis_config_path = os.path.join(script_dir, '..', 'config', 'gis.yaml')
with open(sys_config_path, 'r') as f:
sys_config = yaml.safe_load(f)
with open(gis_config_path, 'r') as f:
gis_config = yaml.safe_load(f)
return sys_config, gis_config
def replicate_table(source, targets, table_name):
print(f"Replicating {table_name}")
# Dump the table from the source
dump_command = [
'pg_dump',
'-h', source['ts_ip'],
'-p', str(source['db_port']),
'-U', source['db_user'],
'-d', source['db_name'],
'-t', table_name,
'--no-owner',
'--no-acl'
]
env = os.environ.copy()
env['PGPASSWORD'] = source['db_pass']
with open(f"{table_name}.sql", 'w') as f:
subprocess.run(dump_command, env=env, stdout=f, check=True)
# Restore the table to each target
for target in targets:
print(f"Replicating to {target['ts_id']}")
restore_command = [
'psql',
'-h', target['ts_ip'],
'-p', str(target['db_port']),
'-U', target['db_user'],
'-d', target['db_name'],
'-c', f"DROP TABLE IF EXISTS {table_name} CASCADE;",
'-f', f"{table_name}.sql"
]
env = os.environ.copy()
env['PGPASSWORD'] = target['db_pass']
subprocess.run(restore_command, env=env, check=True)
# Clean up the dump file
os.remove(f"{table_name}.sql")
def main():
sys_config, gis_config = load_config()
source_server = sys_config['POOL'][0]
target_servers = sys_config['POOL'][1:]
tables = [layer['table_name'] for layer in gis_config['layers']]
for table in tables:
replicate_table(source_server, target_servers, table)
print("Replication complete!")
if __name__ == "__main__":
main()

View file

@ -1,46 +0,0 @@
import asyncio
import asyncpg
# Database connection information
DB_INFO = {
'host': '100.64.64.20',
'port': 5432,
'database': 'sij',
'user': 'sij',
'password': 'Synchr0!'
}
async def update_click_logs():
# Connect to the database
conn = await asyncpg.connect(**DB_INFO)
try:
# Drop existing 'id' and 'new_id' columns if they exist
await conn.execute("""
ALTER TABLE click_logs
DROP COLUMN IF EXISTS id,
DROP COLUMN IF EXISTS new_id;
""")
print("Dropped existing id and new_id columns (if they existed)")
# Add new UUID column as primary key
await conn.execute("""
ALTER TABLE click_logs
ADD COLUMN id UUID PRIMARY KEY DEFAULT gen_random_uuid();
""")
print("Added new UUID column as primary key")
# Get the number of rows in the table
row_count = await conn.fetchval("SELECT COUNT(*) FROM click_logs")
print(f"Number of rows in click_logs: {row_count}")
except Exception as e:
print(f"An error occurred: {str(e)}")
import traceback
traceback.print_exc()
finally:
# Close the database connection
await conn.close()
# Run the update
asyncio.run(update_click_logs())

View file

@ -1,104 +0,0 @@
{
"history_item_ids": [
"ncRYNd0Xef4LiUE74VjP",
"13pQLDAPYGIATwW1ySL5",
"dhsQNAYTWpcwo1X6rixf",
"V7wUip1NJuWAUw26sePF",
"mOYMa5lcI7wRHddIQTSa",
"mP97iOpA4oG7pwUBthq4",
"WTU5nsX6qZCYxLyoT5hq",
"15DPGnBgjr74KT3TMbK4",
"aCyBS1zoaweVjUoPf2TF",
"J8SUMQqZPtoy3Cgdhi3J",
"qKHaaJHfqh2je60Wmadb",
"2PaugQJ8c4rY44JGlaO5",
"TwzxcmYjo6XNebbMabcd",
"xdEK7rYq9UofOlkr565b",
"wik4jYd97aGMLgttTjC9",
"7oXn2yH7gdyhi6sEoWKd",
"jv8aZFiVe8gPMrAOBcNT",
"B2BctCDkCtLDxEMMBu9z",
"4KFO77NHDruNQvXIykwp",
"d033NizZaNZPc45fvxCO",
"yBKxOxfzsjpZYOFzoIM7",
"oEihKwMLWgvvoTLGx4yF",
"Q3guBm4hGml0KPAWKl7t",
"jaojY1gSafQmqshR48oT",
"yqGDMfcceaoceFEEurqa",
"oLdnyUp7plGrUMRVQ8Cf",
"FZAGCGosYEGMf8GCRFaA",
"TrWnXRdGkiH0K9kgwFiS",
"th16OEbg3u0XHslT9A33",
"856BAsn6dnzF7HeqGPfK",
"KjLoAfDXVBqR9s39T25j",
"uHQQJMMOfOxPAhEYQXLl",
"HO8WCIhkkI7AxwkU5MC6",
"9nxdesHWTRLCOd6YgWe9",
"tmx5tlIQ7hdSTgJt16P2",
"M9JN0YcBuCF6LhnqKN66",
"M9xkP4ecn0LIi7mQOfU6",
"CNtJgh52Ykh9ZqEppZeH",
"lgobcoiqmtWfbXkhEwbE",
"nr9jxnsE4DnwmTwCaHqC",
"Rnzo03tcyBqGPdmHemCb",
"X3YVGp7yf9GLgZ7WOuSU",
"wL3bkqxR9xqeFTvkJpSI",
"wNx3XDgFLTjVbMyGrIAO",
"rb0jj1ywBetmdvve5qIL",
"WdNnqvNswXeh6JFoaRSS",
"WT2ViyerKpodYmHDHhCw",
"OvhIRehXNwx7xMJHuTd7",
"EQb1iZtsADxJ0GxLJzEK",
"WXVfBJYoYGB7S61VyETD",
"q0q3Di1YJKF07dOhoa7E",
"a2XBIUPa68UiiKlzwFnG",
"YBuD7KsUpz8jxc5ItZcF",
"KdoucRVCVQGRVQ8Di9Ih",
"CkmDny98GEdfGuj2kaAx",
"R0R2p8luRZL7wwPtDilw",
"awvztgQnuaquK0dTpIuH",
"3ZPN0nJo8UQZYhFhoIOK",
"RJJeTkjYIgdv1ZoXXAax",
"ppxUNzWHAQafsM6OvEUE",
"f2VBm7yE7qmnjdS9CbYz",
"SZIMwz2T5ZAhTxTDBFol",
"YjC91PRgnQbAcdPhnWqU",
"fDTV7n8f6QK5yCwLkBwg",
"KbPpWUuiLPADj9H3OlvG",
"DIuqVoAg7lLxpvFBip84",
"pEwFAKMLGWUMHqfljJSq",
"9wwl7UbsgeKqrk8kNZin",
"2uLvjJgcZDiY9dqB8JlP",
"U5f1qZQM08t2YzJqEmxK",
"gnwn7QIhrCXRAGNddZ1H",
"g5nGEIHirFzKstdrGI1h",
"CQWH5dGSeS38VC4X4yg7",
"C5YGjhJPrTkVOpxIOHdj",
"YLbtnf1pSb9Ra7wgFHiF",
"qNLgNSvMr4VSoisKS9qj",
"Bq2ALvQVsj9L2wMpUvYO",
"gi0yTXLZLMhUKeKcalWc",
"3JQN9UbCsqj9ggi5sCkq",
"oPflJoA9kqBzjlmWY6zL",
"0kUZFgtZdqgdUBXFsXs9",
"aFTi7XdjR8W52ThmFpgc",
"pgIfjcy2UvKggfqJ1aNx",
"r0VguLaqnxTL9jza9H4y",
"444ehr4RtqgU1xjhhTLo",
"pEuzoznVDaQRBhIA9VTy",
"T9hdW9eJkEqDmOsSUoeY",
"wJjHbGzoWiKKOIGmf82T",
"kij4uMmkUlsSDu2zSH1k",
"oWt5rns196JsKIYPyrBS",
"SJ1m9mSOGOLIhkMgA8kq",
"kAaqe0ATrYtkifmZLOE5",
"O2Pvz7CP5rfyNvzFSDmy",
"w1rb8qN5nohVUovC0XAx",
"njFs4I4F7rtd9I6fEn6x",
"miFrp9GBm3MsHO03Z4eY",
"5DJywiPsfeVP9hFdqRhd",
"mUephoXhk5QdWrOfr9Xr",
"tDDiW3Yp0BptZ2wBv21A",
"YpX06liXWHquUVYFlKYa"
]
}

View file

@ -1,225 +0,0 @@
#!/usr/bin/env python3
import requests
import json
import time
import os
import subprocess
import sys
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from datetime import datetime
# Environment variables for database connection
DB_NAME = os.getenv('DB_NAME', 'sij')
DB_USER = os.getenv('DB_USER', 'sij')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'Synchr0!')
DB_HOST = os.getenv('DB_HOST', 'localhost')
DB_PORT = os.getenv('DB_PORT', '5432')
def get_feature_count(url):
params = {
'where': '1=1',
'returnCountOnly': 'true',
'f': 'json'
}
retries = Retry(total=10, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
data = response.json()
return data.get('count', 0)
def fetch_features(url, offset, num, max_retries=5):
params = {
'where': '1=1',
'outFields': '*',
'geometryPrecision': 6,
'outSR': 4326,
'f': 'json',
'resultOffset': offset,
'resultRecordCount': num
}
for attempt in range(max_retries):
try:
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching features (attempt {attempt + 1}/{max_retries}): {e}")
if attempt == max_retries - 1:
raise
time.sleep(5 * (attempt + 1)) # Exponential backoff
def download_layer(layer_num, layer_name):
base_dir = os.path.expanduser('~/data')
file_path = os.path.join(base_dir, f'PLSS_{layer_name}.geojson')
temp_file_path = os.path.join(base_dir, f'PLSS_{layer_name}_temp.json')
url = f"https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer/{layer_num}/query"
total_count = get_feature_count(url)
print(f"Total {layer_name} features: {total_count}")
batch_size = 1000
offset = 0
all_features = []
# Check if temporary file exists and load its content
if os.path.exists(temp_file_path):
with open(temp_file_path, 'r') as f:
all_features = json.load(f)
offset = len(all_features)
print(f"Resuming download from offset {offset}")
try:
while offset < total_count:
print(f"Fetching {layer_name} features {offset} to {offset + batch_size}...")
data = fetch_features(url, offset, batch_size)
new_features = data.get('features', [])
if not new_features:
break
all_features.extend(new_features)
offset += len(new_features)
# Progress indicator
progress = len(all_features) / total_count
bar_length = 30
filled_length = int(bar_length * progress)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
print(f'\rProgress: [{bar}] {progress:.1%} ({len(all_features)}/{total_count} features)', end='', flush=True)
# Save progress to temporary file
with open(temp_file_path, 'w') as f:
json.dump(all_features, f)
time.sleep(1)
print(f"\nTotal {layer_name} features fetched: {len(all_features)}")
geojson_features = [
{
"type": "Feature",
"properties": feature['attributes'],
"geometry": feature['geometry']
} for feature in all_features
]
full_geojson = {
"type": "FeatureCollection",
"features": geojson_features
}
os.makedirs(base_dir, exist_ok=True)
with open(file_path, 'w') as f:
json.dump(full_geojson, f)
print(f"GeoJSON file saved as '{file_path}'")
# Remove temporary file
if os.path.exists(temp_file_path):
os.remove(temp_file_path)
return file_path
except Exception as e:
print(f"\nError during download: {e}")
print(f"Partial data saved in {temp_file_path}")
return None
def check_postgres_connection():
try:
subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME, '-c', 'SELECT 1;'],
check=True, capture_output=True, text=True)
return True
except subprocess.CalledProcessError:
return False
def check_postgis_extension():
try:
result = subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME,
'-c', "SELECT 1 FROM pg_extension WHERE extname = 'postgis';"],
check=True, capture_output=True, text=True)
return '1' in result.stdout
except subprocess.CalledProcessError:
return False
def create_postgis_extension():
try:
subprocess.run(['psql', '-h', DB_HOST, '-p', DB_PORT, '-U', DB_USER, '-d', DB_NAME,
'-c', "CREATE EXTENSION IF NOT EXISTS postgis;"],
check=True, capture_output=True, text=True)
print("PostGIS extension created successfully.")
except subprocess.CalledProcessError as e:
print(f"Error creating PostGIS extension: {e}")
sys.exit(1)
def import_to_postgis(file_path, table_name):
if not check_postgres_connection():
print("Error: Unable to connect to PostgreSQL. Please check your connection settings.")
sys.exit(1)
if not check_postgis_extension():
print("PostGIS extension not found. Attempting to create it...")
create_postgis_extension()
ogr2ogr_command = [
'ogr2ogr',
'-f', 'PostgreSQL',
f'PG:dbname={DB_NAME} user={DB_USER} password={DB_PASSWORD} host={DB_HOST} port={DB_PORT}',
file_path,
'-nln', table_name,
'-overwrite'
]
try:
subprocess.run(ogr2ogr_command, check=True, capture_output=True, text=True)
print(f"Data successfully imported into PostGIS table: {table_name}")
except subprocess.CalledProcessError as e:
print(f"Error importing data into PostGIS: {e}")
print(f"Command that failed: {e.cmd}")
print(f"Error output: {e.stderr}")
def check_ogr2ogr():
try:
subprocess.run(['ogr2ogr', '--version'], check=True, capture_output=True, text=True)
return True
except subprocess.CalledProcessError:
return False
except FileNotFoundError:
return False
def main():
if not check_ogr2ogr():
print("Error: ogr2ogr not found. Please install GDAL/OGR tools.")
print("On Debian: sudo apt-get install gdal-bin")
print("On macOS with Homebrew: brew install gdal")
sys.exit(1)
try:
township_file = os.path.expanduser('~/data/PLSS_Townships.geojson')
if not os.path.exists(township_file):
township_file = download_layer(1, "Townships")
if township_file:
import_to_postgis(township_file, "public.plss_townships")
section_file = os.path.expanduser('~/data/PLSS_Sections.geojson')
if not os.path.exists(section_file):
section_file = download_layer(2, "Sections")
if section_file:
import_to_postgis(section_file, "public.plss_sections")
except requests.exceptions.RequestException as e:
print(f"Error fetching data: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,133 +0,0 @@
# CaPLSS_downloader_and_importer.py
import requests
import json
import time
import os
import subprocess
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
def get_feature_count(url):
params = {
'where': '1=1',
'returnCountOnly': 'true',
'f': 'json'
}
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session = requests.Session()
session.mount("https://", adapter)
response = session.get(url, params=params, timeout=15) # Add timeout parameter
response.raise_for_status()
data = response.json()
return data.get('count', 0)
def fetch_features(url, offset, num):
params = {
'where': '1=1',
'outFields': '*',
'geometryPrecision': 6,
'outSR': 4326,
'f': 'json',
'resultOffset': offset,
'resultRecordCount': num
}
response = requests.get(url, params=params)
response.raise_for_status()
return response.json()
def download_layer(layer_num, layer_name):
url = f"https://gis.blm.gov/arcgis/rest/services/Cadastral/BLM_Natl_PLSS_CadNSDI/MapServer/{layer_num}/query"
total_count = get_feature_count(url)
print(f"Total {layer_name} features: {total_count}")
batch_size = 1000
offset = 0
all_features = []
while offset < total_count:
print(f"Fetching {layer_name} features {offset} to {offset + batch_size}...")
data = fetch_features(url, offset, batch_size)
new_features = data.get('features', [])
if not new_features:
break
all_features.extend(new_features)
offset += len(new_features)
print(f"Progress: {len(all_features)}/{total_count} features")
time.sleep(1) # Be nice to the server
print(f"Total {layer_name} features fetched: {len(all_features)}")
# Convert to GeoJSON
geojson_features = [
{
"type": "Feature",
"properties": feature['attributes'],
"geometry": feature['geometry']
} for feature in all_features
]
full_geojson = {
"type": "FeatureCollection",
"features": geojson_features
}
# Define a base directory that exists on both macOS and Debian
base_dir = os.path.expanduser('~/data')
os.makedirs(base_dir, exist_ok=True) # Create the directory if it doesn't exist
# Use os.path.join to construct the file path
file_path = os.path.join(base_dir, f'PLSS_{layer_name}.geojson')
# Save to file
with open(file_path, 'w') as f:
json.dump(full_geojson, f)
print(f"GeoJSON file saved as '{file_path}'")
return file_path
def import_to_postgis(file_path, table_name):
db_name = 'sij'
db_user = 'sij'
db_password = 'Synchr0!'
ogr2ogr_command = [
'ogr2ogr',
'-f', 'PostgreSQL',
f'PG:dbname={db_name} user={db_user} password={db_password}',
file_path,
'-nln', table_name,
'-overwrite'
]
subprocess.run(ogr2ogr_command, check=True)
print(f"Data successfully imported into PostGIS table: {table_name}")
def main():
try:
# Download and import Townships (Layer 1)
township_file = download_layer(1, "Townships")
import_to_postgis(township_file, "public.plss_townships")
# Download and import Sections (Layer 2)
section_file = download_layer(2, "Sections")
import_to_postgis(section_file, "public.plss_sections")
except requests.exceptions.RequestException as e:
print(f"Error fetching data: {e}")
except subprocess.CalledProcessError as e:
print(f"Error importing data into PostGIS: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,73 +0,0 @@
import requests
import PyPDF2
import io
import re
def scrape_data_from_pdf(url):
response = requests.get(url)
pdf_file = io.BytesIO(response.content)
pdf_reader = PyPDF2.PdfReader(pdf_file)
all_text = ""
for page in pdf_reader.pages:
all_text += page.extract_text() + "\n"
return all_text
def parse_data(raw_data):
lines = raw_data.split('\n')
data = []
current_entry = None
for line in lines:
line = line.strip()
if re.match(r'\d+-\d+-\d+-\w+', line):
if current_entry:
data.append(current_entry)
current_entry = {'Harvest Document': line, 'Raw Data': []}
elif current_entry:
current_entry['Raw Data'].append(line)
if current_entry:
data.append(current_entry)
return data
def filter_data(data):
return [entry for entry in data if any(owner.lower() in ' '.join(entry['Raw Data']).lower() for owner in ["Sierra Pacific", "SPI", "Land & Timber"])]
def extract_location(raw_data):
location = []
for line in raw_data:
if 'MDBM:' in line or 'HBM:' in line:
location.append(line)
return ' '.join(location)
def extract_plss_coordinates(text):
pattern = r'(\w+): T(\d+)([NSEW]) R(\d+)([NSEW]) S(\d+)'
return re.findall(pattern, text)
# Main execution
url = "https://caltreesplans.resources.ca.gov/Caltrees/Report/ShowReport.aspx?module=TH_Document&reportID=492&reportType=LINK_REPORT_LIST"
raw_data = scrape_data_from_pdf(url)
parsed_data = parse_data(raw_data)
print(f"Total timber plans parsed: {len(parsed_data)}")
filtered_data = filter_data(parsed_data)
print(f"Found {len(filtered_data)} matching entries.")
for plan in filtered_data:
print("\nHarvest Document:", plan['Harvest Document'])
location = extract_location(plan['Raw Data'])
print("Location:", location)
plss_coordinates = extract_plss_coordinates(location)
print("PLSS Coordinates:")
for coord in plss_coordinates:
meridian, township, township_dir, range_, range_dir, section = coord
print(f" {meridian}: T{township}{township_dir} R{range_}{range_dir} S{section}")
print("-" * 50)

View file

@ -1,23 +0,0 @@
#!/Users/sij/miniforge3/envs/sijapi/bin/python
import sys
import asyncio
from fastapi import BackgroundTasks
from sijapi.routers.news import process_and_save_article
async def main():
if len(sys.argv) != 2:
print("Usage: python script.py <article_url>")
sys.exit(1)
url = sys.argv[1]
bg_tasks = BackgroundTasks()
try:
result = await process_and_save_article(bg_tasks, url)
print(result)
except Exception as e:
print(f"Error processing article: {str(e)}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,46 +0,0 @@
import asyncio
import asyncpg
# Database connection information
DB_INFO = {
'host': '100.64.64.20',
'port': 5432,
'database': 'sij',
'user': 'sij',
'password': 'Synchr0!'
}
async def update_click_logs():
# Connect to the database
conn = await asyncpg.connect(**DB_INFO)
try:
# Drop existing 'id' and 'new_id' columns if they exist
await conn.execute("""
ALTER TABLE click_logs
DROP COLUMN IF EXISTS id,
DROP COLUMN IF EXISTS new_id;
""")
print("Dropped existing id and new_id columns (if they existed)")
# Add new UUID column as primary key
await conn.execute("""
ALTER TABLE click_logs
ADD COLUMN id UUID PRIMARY KEY DEFAULT gen_random_uuid();
""")
print("Added new UUID column as primary key")
# Get the number of rows in the table
row_count = await conn.fetchval("SELECT COUNT(*) FROM click_logs")
print(f"Number of rows in click_logs: {row_count}")
except Exception as e:
print(f"An error occurred: {str(e)}")
import traceback
traceback.print_exc()
finally:
# Close the database connection
await conn.close()
# Run the update
asyncio.run(update_click_logs())

View file

@ -1,25 +0,0 @@
version: '3.8'
services:
db:
image: postgis/postgis:16-3.4
container_name: sij_postgres
environment:
POSTGRES_DB: sij
POSTGRES_USER: sij
POSTGRES_PASSWORD: Synchr0!
volumes:
- postgres_data:/var/lib/postgresql/data
- ./init-db.sh:/docker-entrypoint-initdb.d/init-db.sh
ports:
- "5432:5432"
networks:
- sij_network
networks:
sij_network:
driver: bridge
volumes:
postgres_data:

View file

@ -1,11 +0,0 @@
#!/bin/bash
set -e
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS postgis_topology;
EOSQL
# Modify pg_hba.conf to allow connections from Tailscale network
echo "host all all 100.64.64.0/24 trust" >> /var/lib/postgresql/data/pg_hba.conf

View file

@ -1,66 +0,0 @@
import asyncio
import asyncpg
import psycopg2
import sys
async def try_async_connect(host, port, user, password, database):
try:
conn = await asyncpg.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
version = await conn.fetchval('SELECT version()')
print(f"Async connection successful to {host}:{port}")
print(f"PostgreSQL version: {version}")
await conn.close()
return True
except Exception as e:
print(f"Async connection failed to {host}:{port}")
print(f"Error: {str(e)}")
return False
def try_sync_connect(host, port, user, password, database):
try:
conn = psycopg2.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
cur = conn.cursor()
cur.execute('SELECT version()')
version = cur.fetchone()[0]
print(f"Sync connection successful to {host}:{port}")
print(f"PostgreSQL version: {version}")
conn.close()
return True
except Exception as e:
print(f"Sync connection failed to {host}:{port}")
print(f"Error: {str(e)}")
return False
async def main():
# Database connection parameters
port = 5432
user = 'sij'
password = 'Synchr0!'
database = 'sij'
hosts = ['100.64.64.20', '127.0.0.1', 'localhost']
print("Attempting asynchronous connections:")
for host in hosts:
await try_async_connect(host, port, user, password, database)
print()
print("Attempting synchronous connections:")
for host in hosts:
try_sync_connect(host, port, user, password, database)
print()
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,89 +0,0 @@
import psycopg2
from psycopg2 import sql
def connect_to_db():
return psycopg2.connect(
dbname='sij',
user='sij',
password='Synchr0!',
host='localhost' # Adjust if your database is not on localhost
)
def get_table_info(conn):
with conn.cursor() as cur:
# Get all tables in the public schema
cur.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
""")
tables = cur.fetchall()
table_info = {}
for (table_name,) in tables:
table_info[table_name] = {
'primary_keys': get_primary_keys(cur, table_name),
'foreign_keys': get_foreign_keys(cur, table_name)
}
return table_info
def get_primary_keys(cur, table_name):
cur.execute("""
SELECT a.attname
FROM pg_index i
JOIN pg_attribute a ON a.attrelid = i.indrelid
AND a.attnum = ANY(i.indkey)
WHERE i.indrelid = %s::regclass
AND i.indisprimary
""", (table_name,))
return [row[0] for row in cur.fetchall()]
def get_foreign_keys(cur, table_name):
cur.execute("""
SELECT
tc.constraint_name,
kcu.column_name,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name
FROM
information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name=%s
""", (table_name,))
return cur.fetchall()
def main():
try:
with connect_to_db() as conn:
table_info = get_table_info(conn)
for table_name, info in table_info.items():
print(f"\n## Table: {table_name}")
print("\nPrimary Keys:")
if info['primary_keys']:
for pk in info['primary_keys']:
print(f"- {pk}")
else:
print("- No primary keys found")
print("\nForeign Keys:")
if info['foreign_keys']:
for fk in info['foreign_keys']:
print(f"- {fk[1]} -> {fk[2]}.{fk[3]} (Constraint: {fk[0]})")
else:
print("- No foreign keys found")
except psycopg2.Error as e:
print(f"Database error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,46 +0,0 @@
import asyncio
import asyncpg
# Database connection information
DB_INFO = {
'host': '100.64.64.20',
'port': 5432,
'database': 'sij',
'user': 'sij',
'password': 'Synchr0!'
}
async def update_click_logs():
# Connect to the database
conn = await asyncpg.connect(**DB_INFO)
try:
# Drop existing 'id' and 'new_id' columns if they exist
await conn.execute("""
ALTER TABLE click_logs
DROP COLUMN IF EXISTS id,
DROP COLUMN IF EXISTS new_id;
""")
print("Dropped existing id and new_id columns (if they existed)")
# Add new UUID column as primary key
await conn.execute("""
ALTER TABLE click_logs
ADD COLUMN id UUID PRIMARY KEY DEFAULT gen_random_uuid();
""")
print("Added new UUID column as primary key")
# Get the number of rows in the table
row_count = await conn.fetchval("SELECT COUNT(*) FROM click_logs")
print(f"Number of rows in click_logs: {row_count}")
except Exception as e:
print(f"An error occurred: {str(e)}")
import traceback
traceback.print_exc()
finally:
# Close the database connection
await conn.close()
# Run the update
asyncio.run(update_click_logs())

View file

@ -1,104 +0,0 @@
{
"history_item_ids": [
"ncRYNd0Xef4LiUE74VjP",
"13pQLDAPYGIATwW1ySL5",
"dhsQNAYTWpcwo1X6rixf",
"V7wUip1NJuWAUw26sePF",
"mOYMa5lcI7wRHddIQTSa",
"mP97iOpA4oG7pwUBthq4",
"WTU5nsX6qZCYxLyoT5hq",
"15DPGnBgjr74KT3TMbK4",
"aCyBS1zoaweVjUoPf2TF",
"J8SUMQqZPtoy3Cgdhi3J",
"qKHaaJHfqh2je60Wmadb",
"2PaugQJ8c4rY44JGlaO5",
"TwzxcmYjo6XNebbMabcd",
"xdEK7rYq9UofOlkr565b",
"wik4jYd97aGMLgttTjC9",
"7oXn2yH7gdyhi6sEoWKd",
"jv8aZFiVe8gPMrAOBcNT",
"B2BctCDkCtLDxEMMBu9z",
"4KFO77NHDruNQvXIykwp",
"d033NizZaNZPc45fvxCO",
"yBKxOxfzsjpZYOFzoIM7",
"oEihKwMLWgvvoTLGx4yF",
"Q3guBm4hGml0KPAWKl7t",
"jaojY1gSafQmqshR48oT",
"yqGDMfcceaoceFEEurqa",
"oLdnyUp7plGrUMRVQ8Cf",
"FZAGCGosYEGMf8GCRFaA",
"TrWnXRdGkiH0K9kgwFiS",
"th16OEbg3u0XHslT9A33",
"856BAsn6dnzF7HeqGPfK",
"KjLoAfDXVBqR9s39T25j",
"uHQQJMMOfOxPAhEYQXLl",
"HO8WCIhkkI7AxwkU5MC6",
"9nxdesHWTRLCOd6YgWe9",
"tmx5tlIQ7hdSTgJt16P2",
"M9JN0YcBuCF6LhnqKN66",
"M9xkP4ecn0LIi7mQOfU6",
"CNtJgh52Ykh9ZqEppZeH",
"lgobcoiqmtWfbXkhEwbE",
"nr9jxnsE4DnwmTwCaHqC",
"Rnzo03tcyBqGPdmHemCb",
"X3YVGp7yf9GLgZ7WOuSU",
"wL3bkqxR9xqeFTvkJpSI",
"wNx3XDgFLTjVbMyGrIAO",
"rb0jj1ywBetmdvve5qIL",
"WdNnqvNswXeh6JFoaRSS",
"WT2ViyerKpodYmHDHhCw",
"OvhIRehXNwx7xMJHuTd7",
"EQb1iZtsADxJ0GxLJzEK",
"WXVfBJYoYGB7S61VyETD",
"q0q3Di1YJKF07dOhoa7E",
"a2XBIUPa68UiiKlzwFnG",
"YBuD7KsUpz8jxc5ItZcF",
"KdoucRVCVQGRVQ8Di9Ih",
"CkmDny98GEdfGuj2kaAx",
"R0R2p8luRZL7wwPtDilw",
"awvztgQnuaquK0dTpIuH",
"3ZPN0nJo8UQZYhFhoIOK",
"RJJeTkjYIgdv1ZoXXAax",
"ppxUNzWHAQafsM6OvEUE",
"f2VBm7yE7qmnjdS9CbYz",
"SZIMwz2T5ZAhTxTDBFol",
"YjC91PRgnQbAcdPhnWqU",
"fDTV7n8f6QK5yCwLkBwg",
"KbPpWUuiLPADj9H3OlvG",
"DIuqVoAg7lLxpvFBip84",
"pEwFAKMLGWUMHqfljJSq",
"9wwl7UbsgeKqrk8kNZin",
"2uLvjJgcZDiY9dqB8JlP",
"U5f1qZQM08t2YzJqEmxK",
"gnwn7QIhrCXRAGNddZ1H",
"g5nGEIHirFzKstdrGI1h",
"CQWH5dGSeS38VC4X4yg7",
"C5YGjhJPrTkVOpxIOHdj",
"YLbtnf1pSb9Ra7wgFHiF",
"qNLgNSvMr4VSoisKS9qj",
"Bq2ALvQVsj9L2wMpUvYO",
"gi0yTXLZLMhUKeKcalWc",
"3JQN9UbCsqj9ggi5sCkq",
"oPflJoA9kqBzjlmWY6zL",
"0kUZFgtZdqgdUBXFsXs9",
"aFTi7XdjR8W52ThmFpgc",
"pgIfjcy2UvKggfqJ1aNx",
"r0VguLaqnxTL9jza9H4y",
"444ehr4RtqgU1xjhhTLo",
"pEuzoznVDaQRBhIA9VTy",
"T9hdW9eJkEqDmOsSUoeY",
"wJjHbGzoWiKKOIGmf82T",
"kij4uMmkUlsSDu2zSH1k",
"oWt5rns196JsKIYPyrBS",
"SJ1m9mSOGOLIhkMgA8kq",
"kAaqe0ATrYtkifmZLOE5",
"O2Pvz7CP5rfyNvzFSDmy",
"w1rb8qN5nohVUovC0XAx",
"njFs4I4F7rtd9I6fEn6x",
"miFrp9GBm3MsHO03Z4eY",
"5DJywiPsfeVP9hFdqRhd",
"mUephoXhk5QdWrOfr9Xr",
"tDDiW3Yp0BptZ2wBv21A",
"YpX06liXWHquUVYFlKYa"
]
}

View file

@ -1,63 +0,0 @@
import asyncio
from pathlib import Path
from sijapi import L, EMAIL_CONFIG, EMAIL_LOGS
from sijapi.classes import EmailAccount
from sijapi.routers import email
async def initialize_log_files():
summarized_log = EMAIL_LOGS / "summarized.txt"
autoresponded_log = EMAIL_LOGS / "autoresponded.txt"
diagnostic_log = EMAIL_LOGS / "diagnostic.txt"
for log_file in [summarized_log, autoresponded_log, diagnostic_log]:
log_file.parent.mkdir(parents=True, exist_ok=True)
log_file.write_text("")
L.DEBUG(f"Log files initialized: {summarized_log}, {autoresponded_log}, {diagnostic_log}")
return summarized_log, autoresponded_log, diagnostic_log
async def process_all_emails(account: EmailAccount, summarized_log: Path, autoresponded_log: Path, diagnostic_log: Path):
try:
with email.get_imap_connection(account) as inbox:
L.DEBUG(f"Connected to {account.name}, processing all emails...")
all_messages = inbox.messages()
unread_messages = set(uid for uid, _ in inbox.messages(unread=True))
processed_count = 0
for identifier, message in all_messages:
# Log diagnostic information
with open(diagnostic_log, 'a') as f:
f.write(f"Account: {account.name}, Raw Identifier: {identifier}, Type: {type(identifier)}\n")
# Attempt to get a string representation of the identifier
if isinstance(identifier, bytes):
id_str = identifier.decode()
elif isinstance(identifier, (int, str)):
id_str = str(identifier)
else:
id_str = repr(identifier)
if identifier not in unread_messages:
processed_count += 1
for log_file in [summarized_log, autoresponded_log]:
with open(log_file, 'a') as f:
f.write(f"{id_str}\n")
L.INFO(f"Processed {processed_count} non-unread emails for account {account.name}")
except Exception as e:
L.logger.error(f"An error occurred while processing emails for account {account.name}: {e}")
async def main():
email_accounts = email.load_email_accounts(EMAIL_CONFIG)
summarized_log, autoresponded_log, diagnostic_log = await initialize_log_files()
L.DEBUG(f"Processing {len(email_accounts)} email accounts")
tasks = [process_all_emails(account, summarized_log, autoresponded_log, diagnostic_log) for account in email_accounts]
await asyncio.gather(*tasks)
# Final verification
with open(summarized_log, 'r') as f:
final_count = len(f.readlines())
L.INFO(f"Final non-unread email count: {final_count}")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,191 +0,0 @@
import psycopg2
from psycopg2 import sql
import sys
def connect_to_db():
return psycopg2.connect(
dbname='sij',
user='sij',
password='Synchr0!',
host='localhost'
)
def get_tables(cur):
cur.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
AND table_name NOT LIKE '%_uuid' AND table_name NOT LIKE '%_orig'
AND table_name != 'spatial_ref_sys'
""")
return [row[0] for row in cur.fetchall()]
def get_columns(cur, table_name):
cur.execute("""
SELECT column_name, udt_name,
is_nullable, column_default,
character_maximum_length, numeric_precision, numeric_scale
FROM information_schema.columns
WHERE table_name = %s
ORDER BY ordinal_position
""", (table_name,))
return cur.fetchall()
def get_constraints(cur, table_name):
cur.execute("""
SELECT conname, contype, pg_get_constraintdef(c.oid)
FROM pg_constraint c
JOIN pg_namespace n ON n.oid = c.connamespace
WHERE conrelid = %s::regclass
AND n.nspname = 'public'
""", (table_name,))
return cur.fetchall()
def drop_table_if_exists(cur, table_name):
cur.execute(sql.SQL("DROP TABLE IF EXISTS {} CASCADE").format(sql.Identifier(table_name)))
def create_uuid_table(cur, old_table, new_table):
drop_table_if_exists(cur, new_table)
columns = get_columns(cur, old_table)
constraints = get_constraints(cur, old_table)
column_defs = []
has_id_column = any(col[0] == 'id' for col in columns)
for col in columns:
col_name, udt_name, is_nullable, default, max_length, precision, scale = col
if col_name == 'id' and has_id_column:
column_defs.append(sql.SQL("{} UUID PRIMARY KEY DEFAULT gen_random_uuid()").format(sql.Identifier(col_name)))
else:
type_sql = sql.SQL("{}").format(sql.Identifier(udt_name))
if max_length:
type_sql = sql.SQL("{}({})").format(type_sql, sql.Literal(max_length))
elif precision and scale:
type_sql = sql.SQL("{}({},{})").format(type_sql, sql.Literal(precision), sql.Literal(scale))
column_def = sql.SQL("{} {}").format(sql.Identifier(col_name), type_sql)
if is_nullable == 'NO':
column_def = sql.SQL("{} NOT NULL").format(column_def)
if default and 'nextval' not in default: # Skip auto-increment defaults
column_def = sql.SQL("{} DEFAULT {}").format(column_def, sql.SQL(default))
column_defs.append(column_def)
constraint_defs = []
for constraint in constraints:
conname, contype, condef = constraint
if contype != 'p' or not has_id_column: # Keep primary key if there's no id column
constraint_defs.append(sql.SQL(condef))
if not has_id_column:
column_defs.append(sql.SQL("uuid UUID DEFAULT gen_random_uuid()"))
query = sql.SQL("CREATE TABLE {} ({})").format(
sql.Identifier(new_table),
sql.SQL(", ").join(column_defs + constraint_defs)
)
cur.execute(query)
def migrate_data(cur, old_table, new_table):
columns = get_columns(cur, old_table)
column_names = [col[0] for col in columns]
has_id_column = 'id' in column_names
if has_id_column:
column_names.remove('id')
old_cols = sql.SQL(", ").join(map(sql.Identifier, column_names))
new_cols = sql.SQL(", ").join(map(sql.Identifier, ['id'] + column_names))
query = sql.SQL("INSERT INTO {} ({}) SELECT gen_random_uuid(), {} FROM {}").format(
sql.Identifier(new_table),
new_cols,
old_cols,
sql.Identifier(old_table)
)
else:
old_cols = sql.SQL(", ").join(map(sql.Identifier, column_names))
new_cols = sql.SQL(", ").join(map(sql.Identifier, column_names + ['uuid']))
query = sql.SQL("INSERT INTO {} ({}) SELECT {}, gen_random_uuid() FROM {}").format(
sql.Identifier(new_table),
new_cols,
old_cols,
sql.Identifier(old_table)
)
cur.execute(query)
def update_foreign_keys(cur, tables):
for table in tables:
constraints = get_constraints(cur, table)
for constraint in constraints:
conname, contype, condef = constraint
if contype == 'f': # Foreign key constraint
referenced_table = condef.split('REFERENCES ')[1].split('(')[0].strip()
referenced_column = condef.split('(')[2].split(')')[0].strip()
local_column = condef.split('(')[1].split(')')[0].strip()
cur.execute(sql.SQL("""
UPDATE {table_uuid}
SET {local_column} = subquery.new_id::text::{local_column_type}
FROM (
SELECT old.{ref_column} AS old_id, new_table.id AS new_id
FROM {ref_table} old
JOIN public.{ref_table_uuid} new_table ON new_table.{ref_column}::text = old.{ref_column}::text
) AS subquery
WHERE {local_column}::text = subquery.old_id::text
""").format(
table_uuid=sql.Identifier(f"{table}_uuid"),
local_column=sql.Identifier(local_column),
local_column_type=sql.SQL(get_column_type(cur, f"{table}_uuid", local_column)),
ref_column=sql.Identifier(referenced_column),
ref_table=sql.Identifier(referenced_table),
ref_table_uuid=sql.Identifier(f"{referenced_table}_uuid")
))
def get_column_type(cur, table_name, column_name):
cur.execute("""
SELECT data_type
FROM information_schema.columns
WHERE table_name = %s AND column_name = %s
""", (table_name, column_name))
return cur.fetchone()[0]
def rename_tables(cur, tables):
for table in tables:
drop_table_if_exists(cur, f"{table}_orig")
cur.execute(sql.SQL("ALTER TABLE IF EXISTS {} RENAME TO {}").format(
sql.Identifier(table), sql.Identifier(f"{table}_orig")
))
cur.execute(sql.SQL("ALTER TABLE IF EXISTS {} RENAME TO {}").format(
sql.Identifier(f"{table}_uuid"), sql.Identifier(table)
))
def main():
try:
with connect_to_db() as conn:
with conn.cursor() as cur:
tables = get_tables(cur)
# Create new UUID tables
for table in tables:
print(f"Creating UUID table for {table}...")
create_uuid_table(cur, table, f"{table}_uuid")
# Migrate data
for table in tables:
print(f"Migrating data for {table}...")
migrate_data(cur, table, f"{table}_uuid")
# Update foreign keys
print("Updating foreign key references...")
update_foreign_keys(cur, tables)
# Rename tables
print("Renaming tables...")
rename_tables(cur, tables)
conn.commit()
print("Migration completed successfully.")
except Exception as e:
print(f"An error occurred: {e}")
conn.rollback()
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load diff

View file

@ -1,211 +0,0 @@
import yaml
import requests
import paramiko
import time
from pathlib import Path
import logging
import subprocess
import os
import argparse
import sys
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def load_config():
config_path = Path(__file__).parent.parent / 'config' / 'sys.yaml'
with open(config_path, 'r') as file:
return yaml.safe_load(file)
def load_env():
env_path = Path(__file__).parent.parent / 'config' / '.env'
if env_path.exists():
with open(env_path, 'r') as file:
for line in file:
line = line.strip()
if line and not line.startswith('#'):
try:
key, value = line.split('=', 1)
os.environ[key.strip()] = value.strip()
except ValueError:
logging.warning(f"Skipping invalid line in .env file: {line}")
def check_server(ip, port, ts_id):
address = f"http://{ip}:{port}/id"
try:
response = requests.get(address, timeout=5)
response_text = response.text.strip().strip('"')
return response.status_code == 200 and response_text == ts_id
except requests.RequestException as e:
logging.error(f"Error checking server {ts_id}: {str(e)}")
return False
def execute_ssh_command(ssh, command):
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
output = stdout.read().decode().strip()
error = stderr.read().decode().strip()
return exit_status, output, error
def is_local_tmux_session_running(session_name):
try:
result = subprocess.run(['tmux', 'has-session', '-t', session_name], capture_output=True, text=True)
return result.returncode == 0
except subprocess.CalledProcessError:
return False
def start_local_server(server, pull=False, push=False):
try:
if is_local_tmux_session_running('sijapi'):
logging.info("Local sijapi tmux session is already running.")
return
git_command = ""
if pull:
git_command = "git pull &&"
elif push:
git_command = "git add -A . && git commit -m \"auto-update\" && git push origin --force &&"
command = f"{server['tmux']} new-session -d -s sijapi 'cd {server['path']} && {git_command} {server['conda_env']}/bin/python -m sijapi'"
logging.info(f"Executing local command: {command}")
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
logging.info(f"Successfully started sijapi session on local machine")
logging.debug(f"Command output: {result.stdout}")
except subprocess.CalledProcessError as e:
logging.error(f"Failed to start sijapi session on local machine. Error: {e}")
logging.error(f"Error output: {e.stderr}")
def start_remote_server(server, pull=False, push=False):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(
server['ts_ip'],
port=server['ssh_port'],
username=server['ssh_user'],
password=server['ssh_pass'],
timeout=10
)
status, output, error = execute_ssh_command(ssh, f"{server['tmux']} has-session -t sijapi 2>/dev/null && echo 'exists' || echo 'not exists'")
if output == 'exists':
logging.info(f"sijapi session already exists on {server['ts_id']}")
return
git_command = ""
if pull:
git_command = "git pull &&"
elif push:
git_command = "git add -A . && git commit -m \"auto-update\" && git push origin --force &&"
command = f"{server['tmux']} new-session -d -s sijapi 'cd {server['path']} && {git_command} {server['conda_env']}/bin/python -m sijapi'"
status, output, error = execute_ssh_command(ssh, command)
if status == 0:
logging.info(f"Successfully started sijapi session on {server['ts_id']}")
else:
logging.error(f"Failed to start sijapi session on {server['ts_id']}. Error: {error}")
except paramiko.SSHException as e:
logging.error(f"Failed to connect to {server['ts_id']}: {str(e)}")
finally:
ssh.close()
def kill_local_server():
try:
if is_local_tmux_session_running('sijapi'):
subprocess.run(['tmux', 'kill-session', '-t', 'sijapi'], check=True)
logging.info("Killed local sijapi tmux session.")
else:
logging.info("No local sijapi tmux session to kill.")
except subprocess.CalledProcessError as e:
logging.error(f"Failed to kill local sijapi tmux session. Error: {e}")
def kill_remote_server(server):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(
server['ts_ip'],
port=server['ssh_port'],
username=server['ssh_user'],
password=server['ssh_pass'],
timeout=10
)
command = f"{server['tmux']} kill-session -t sijapi"
status, output, error = execute_ssh_command(ssh, command)
if status == 0:
logging.info(f"Successfully killed sijapi session on {server['ts_id']}")
else:
logging.error(f"Failed to kill sijapi session on {server['ts_id']}. Error: {error}")
except paramiko.SSHException as e:
logging.error(f"Failed to connect to {server['ts_id']}: {str(e)}")
finally:
ssh.close()
def main():
load_env()
config = load_config()
pool = config['POOL']
local_ts_id = os.environ.get('TS_ID')
parser = argparse.ArgumentParser(description='Manage sijapi servers')
parser.add_argument('--kill', action='store_true', help='Kill the local sijapi tmux session')
parser.add_argument('--restart', action='store_true', help='Restart the local sijapi tmux session')
parser.add_argument('--all', action='store_true', help='Apply the action to all servers')
parser.add_argument('--pull', action='store_true', help='Pull latest changes before starting the server')
parser.add_argument('--push', action='store_true', help='Push changes before starting the server')
args = parser.parse_args()
if args.kill:
if args.all:
for server in pool:
if server['ts_id'] == local_ts_id:
kill_local_server()
else:
kill_remote_server(server)
else:
kill_local_server()
sys.exit(0)
if args.restart or args.pull or args.push:
if args.all:
for server in pool:
if server['ts_id'] == local_ts_id:
kill_local_server()
start_local_server(server, pull=args.pull, push=args.push)
else:
kill_remote_server(server)
start_remote_server(server, pull=args.pull, push=args.push)
else:
kill_local_server()
local_server = next(server for server in pool if server['ts_id'] == local_ts_id)
start_local_server(local_server, pull=args.pull, push=args.push)
sys.exit(0)
# If no specific arguments, run the default behavior
local_server = next(server for server in pool if server['ts_id'] == local_ts_id)
if not check_server(local_server['ts_ip'], local_server['app_port'], local_server['ts_id']):
logging.info(f"Local server {local_server['ts_id']} is not responding correctly. Attempting to start...")
kill_local_server()
start_local_server(local_server, push=True)
for server in pool:
if server['ts_id'] != local_ts_id:
if not check_server(server['ts_ip'], server['app_port'], server['ts_id']):
logging.info(f"{server['ts_id']} is not responding correctly. Attempting to start...")
kill_remote_server(server)
start_remote_server(server, pull=True)
else:
logging.info(f"{server['ts_id']} is running and responding correctly.")
time.sleep(1)
if __name__ == "__main__":
main()

View file

@ -1,34 +0,0 @@
from aura_sr import AuraSR
from PIL import Image
import torch
import os
# Set environment variables for MPS
os.environ['PYTORCH_MPS_HIGH_WATERMARK_RATIO'] = '0.0'
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
# Initialize device as CPU for default
device = torch.device('cpu')
# Check if MPS is available
if torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
device = torch.device('mps:0')
# Overwrite the default CUDA device with MPS
torch.cuda.default_stream = device
aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR").to(device)
def load_image_from_path(file_path):
return Image.open(file_path)
def upscale_and_save(original_path):
original_image = load_image_from_path(original_path)
upscaled_image = aura_sr.upscale_4x(original_image)
upscaled_image.save(original_path)
# Insert your image path
upscale_and_save("/Users/sij/workshop/sijapi/sijapi/testbed/API__00482_ 2.png")

File diff suppressed because it is too large Load diff

View file

@ -1,67 +0,0 @@
import asyncio
from pathlib import Path
from sijapi import EMAIL_CONFIG, EMAIL_LOGS
from sijapi.utilities import EmailAccount
from sijapi.routers import email
from sijapi.logs import get_logger
l = get_logger(__name__)
async def initialize_log_files():
summarized_log = EMAIL_LOGS / "summarized.txt"
autoresponded_log = EMAIL_LOGS / "autoresponded.txt"
diagnostic_log = EMAIL_LOGS / "diagnostic.txt"
for log_file in [summarized_log, autoresponded_log, diagnostic_log]:
log_file.parent.mkdir(parents=True, exist_ok=True)
log_file.write_text("")
l.debug(f"Log files initialized: {summarized_log}, {autoresponded_log}, {diagnostic_log}")
return summarized_log, autoresponded_log, diagnostic_log
async def process_all_emails(account: EmailAccount, summarized_log: Path, autoresponded_log: Path, diagnostic_log: Path):
try:
with email.get_imap_connection(account) as inbox:
l.debug(f"Connected to {account.name}, processing all emails...")
all_messages = inbox.messages()
unread_messages = set(uid for uid, _ in inbox.messages(unread=True))
processed_count = 0
for identifier, message in all_messages:
# Log diagnostic information
with open(diagnostic_log, 'a') as f:
f.write(f"Account: {account.name}, Raw Identifier: {identifier}, Type: {type(identifier)}\n")
# Attempt to get a string representation of the identifier
if isinstance(identifier, bytes):
id_str = identifier.decode()
elif isinstance(identifier, (int, str)):
id_str = str(identifier)
else:
id_str = repr(identifier)
if identifier not in unread_messages:
processed_count += 1
for log_file in [summarized_log, autoresponded_log]:
with open(log_file, 'a') as f:
f.write(f"{id_str}\n")
l.info(f"Processed {processed_count} non-unread emails for account {account.name}")
except Exception as e:
l.logger.error(f"An error occurred while processing emails for account {account.name}: {e}")
async def main():
email_accounts = email.load_email_accounts(EMAIL_CONFIG)
summarized_log, autoresponded_log, diagnostic_log = await initialize_log_files()
l.debug(f"Processing {len(email_accounts)} email accounts")
tasks = [process_all_emails(account, summarized_log, autoresponded_log, diagnostic_log) for account in email_accounts]
await asyncio.gather(*tasks)
# Final verification
with open(summarized_log, 'r') as f:
final_count = len(f.readlines())
l.info(f"Final non-unread email count: {final_count}")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,191 +0,0 @@
import psycopg2
from psycopg2 import sql
import sys
def connect_to_db():
return psycopg2.connect(
dbname='sij',
user='sij',
password='Synchr0!',
host='localhost'
)
def get_tables(cur):
cur.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' AND table_type = 'BASE TABLE'
AND table_name NOT LIKE '%_uuid' AND table_name NOT LIKE '%_orig'
AND table_name != 'spatial_ref_sys'
""")
return [row[0] for row in cur.fetchall()]
def get_columns(cur, table_name):
cur.execute("""
SELECT column_name, udt_name,
is_nullable, column_default,
character_maximum_length, numeric_precision, numeric_scale
FROM information_schema.columns
WHERE table_name = %s
ORDER BY ordinal_position
""", (table_name,))
return cur.fetchall()
def get_constraints(cur, table_name):
cur.execute("""
SELECT conname, contype, pg_get_constraintdef(c.oid)
FROM pg_constraint c
JOIN pg_namespace n ON n.oid = c.connamespace
WHERE conrelid = %s::regclass
AND n.nspname = 'public'
""", (table_name,))
return cur.fetchall()
def drop_table_if_exists(cur, table_name):
cur.execute(sql.SQL("DROP TABLE IF EXISTS {} CASCADE").format(sql.Identifier(table_name)))
def create_uuid_table(cur, old_table, new_table):
drop_table_if_exists(cur, new_table)
columns = get_columns(cur, old_table)
constraints = get_constraints(cur, old_table)
column_defs = []
has_id_column = any(col[0] == 'id' for col in columns)
for col in columns:
col_name, udt_name, is_nullable, default, max_length, precision, scale = col
if col_name == 'id' and has_id_column:
column_defs.append(sql.SQL("{} UUID PRIMARY KEY DEFAULT gen_random_uuid()").format(sql.Identifier(col_name)))
else:
type_sql = sql.SQL("{}").format(sql.Identifier(udt_name))
if max_length:
type_sql = sql.SQL("{}({})").format(type_sql, sql.Literal(max_length))
elif precision and scale:
type_sql = sql.SQL("{}({},{})").format(type_sql, sql.Literal(precision), sql.Literal(scale))
column_def = sql.SQL("{} {}").format(sql.Identifier(col_name), type_sql)
if is_nullable == 'NO':
column_def = sql.SQL("{} NOT NULL").format(column_def)
if default and 'nextval' not in default: # Skip auto-increment defaults
column_def = sql.SQL("{} DEFAULT {}").format(column_def, sql.SQL(default))
column_defs.append(column_def)
constraint_defs = []
for constraint in constraints:
conname, contype, condef = constraint
if contype != 'p' or not has_id_column: # Keep primary key if there's no id column
constraint_defs.append(sql.SQL(condef))
if not has_id_column:
column_defs.append(sql.SQL("uuid UUID DEFAULT gen_random_uuid()"))
query = sql.SQL("CREATE TABLE {} ({})").format(
sql.Identifier(new_table),
sql.SQL(", ").join(column_defs + constraint_defs)
)
cur.execute(query)
def migrate_data(cur, old_table, new_table):
columns = get_columns(cur, old_table)
column_names = [col[0] for col in columns]
has_id_column = 'id' in column_names
if has_id_column:
column_names.remove('id')
old_cols = sql.SQL(", ").join(map(sql.Identifier, column_names))
new_cols = sql.SQL(", ").join(map(sql.Identifier, ['id'] + column_names))
query = sql.SQL("INSERT INTO {} ({}) SELECT gen_random_uuid(), {} FROM {}").format(
sql.Identifier(new_table),
new_cols,
old_cols,
sql.Identifier(old_table)
)
else:
old_cols = sql.SQL(", ").join(map(sql.Identifier, column_names))
new_cols = sql.SQL(", ").join(map(sql.Identifier, column_names + ['uuid']))
query = sql.SQL("INSERT INTO {} ({}) SELECT {}, gen_random_uuid() FROM {}").format(
sql.Identifier(new_table),
new_cols,
old_cols,
sql.Identifier(old_table)
)
cur.execute(query)
def update_foreign_keys(cur, tables):
for table in tables:
constraints = get_constraints(cur, table)
for constraint in constraints:
conname, contype, condef = constraint
if contype == 'f': # Foreign key constraint
referenced_table = condef.split('REFERENCES ')[1].split('(')[0].strip()
referenced_column = condef.split('(')[2].split(')')[0].strip()
local_column = condef.split('(')[1].split(')')[0].strip()
cur.execute(sql.SQL("""
UPDATE {table_uuid}
SET {local_column} = subquery.new_id::text::{local_column_type}
FROM (
SELECT old.{ref_column} AS old_id, new_table.id AS new_id
FROM {ref_table} old
JOIN public.{ref_table_uuid} new_table ON new_table.{ref_column}::text = old.{ref_column}::text
) AS subquery
WHERE {local_column}::text = subquery.old_id::text
""").format(
table_uuid=sql.Identifier(f"{table}_uuid"),
local_column=sql.Identifier(local_column),
local_column_type=sql.SQL(get_column_type(cur, f"{table}_uuid", local_column)),
ref_column=sql.Identifier(referenced_column),
ref_table=sql.Identifier(referenced_table),
ref_table_uuid=sql.Identifier(f"{referenced_table}_uuid")
))
def get_column_type(cur, table_name, column_name):
cur.execute("""
SELECT data_type
FROM information_schema.columns
WHERE table_name = %s AND column_name = %s
""", (table_name, column_name))
return cur.fetchone()[0]
def rename_tables(cur, tables):
for table in tables:
drop_table_if_exists(cur, f"{table}_orig")
cur.execute(sql.SQL("ALTER TABLE IF EXISTS {} RENAME TO {}").format(
sql.Identifier(table), sql.Identifier(f"{table}_orig")
))
cur.execute(sql.SQL("ALTER TABLE IF EXISTS {} RENAME TO {}").format(
sql.Identifier(f"{table}_uuid"), sql.Identifier(table)
))
def main():
try:
with connect_to_db() as conn:
with conn.cursor() as cur:
tables = get_tables(cur)
# Create new UUID tables
for table in tables:
print(f"Creating UUID table for {table}...")
create_uuid_table(cur, table, f"{table}_uuid")
# Migrate data
for table in tables:
print(f"Migrating data for {table}...")
migrate_data(cur, table, f"{table}_uuid")
# Update foreign keys
print("Updating foreign key references...")
update_foreign_keys(cur, tables)
# Rename tables
print("Renaming tables...")
rename_tables(cur, tables)
conn.commit()
print("Migration completed successfully.")
except Exception as e:
print(f"An error occurred: {e}")
conn.rollback()
if __name__ == "__main__":
main()

View file

@ -1,366 +0,0 @@
#!/usr/bin/env python3
import requests
import json
import time
import os
import subprocess
import sys
import yaml
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import argparse
import psycopg2
from psycopg2.extras import execute_values
def load_config():
script_dir = os.path.dirname(os.path.abspath(__file__))
sys_config_path = os.path.join(script_dir, '..', 'config', 'sys.yaml')
gis_config_path = os.path.join(script_dir, '..', 'config', 'gis.yaml')
with open(sys_config_path, 'r') as f:
sys_config = yaml.safe_load(f)
with open(gis_config_path, 'r') as f:
gis_config = yaml.safe_load(f)
return sys_config, gis_config
def get_db_config(sys_config):
pool = sys_config.get('POOL', [])
if pool:
db_config = pool[0]
return {
'DB_NAME': db_config.get('db_name'),
'DB_USER': db_config.get('db_user'),
'DB_PASSWORD': db_config.get('db_pass'),
'DB_HOST': db_config.get('ts_ip'),
'DB_PORT': str(db_config.get('db_port'))
}
return {}
def get_feature_count(url):
params = {
'where': '1=1',
'returnCountOnly': 'true',
'f': 'json'
}
retries = Retry(total=10, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
data = response.json()
return data.get('count', 0)
def fetch_features(url, offset, num, max_retries=5):
params = {
'where': '1=1',
'outFields': '*',
'geometryPrecision': 6,
'outSR': 4326,
'f': 'json',
'resultOffset': offset,
'resultRecordCount': num,
'orderByFields': 'OBJECTID'
}
for attempt in range(max_retries):
try:
retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
with requests.Session() as session:
session.mount("https://", HTTPAdapter(max_retries=retries))
response = session.get(url, params=params, timeout=30)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error fetching features (attempt {attempt + 1}/{max_retries}): {e}")
if attempt == max_retries - 1:
raise
time.sleep(5 * (attempt + 1)) # Exponential backoff
def create_table(db_config, table_name, gis_config):
conn = psycopg2.connect(
dbname=db_config['DB_NAME'],
user=db_config['DB_USER'],
password=db_config['DB_PASSWORD'],
host=db_config['DB_HOST'],
port=db_config['DB_PORT']
)
try:
with conn.cursor() as cur:
# Check if the table already exists
cur.execute(f"SELECT to_regclass('{table_name}')")
if cur.fetchone()[0] is None:
# If the table doesn't exist, create it based on the first feature
url = next(layer['url'] for layer in gis_config['layers'] if layer['table_name'] == table_name)
first_feature = fetch_features(url, 0, 1)['features'][0]
columns = []
for attr, value in first_feature['attributes'].items():
column_name = attr.lower().replace('.', '_').replace('()', '')
if isinstance(value, int):
columns.append(f'"{column_name}" INTEGER')
elif isinstance(value, float):
columns.append(f'"{column_name}" DOUBLE PRECISION')
else:
columns.append(f'"{column_name}" TEXT')
create_sql = f"""
CREATE TABLE {table_name} (
id SERIAL PRIMARY KEY,
geom GEOMETRY(Polygon, 4326),
{', '.join(columns)}
)
"""
cur.execute(create_sql)
# Create index on plssid
cur.execute(f'CREATE INDEX idx_{table_name.split(".")[-1]}_plssid ON {table_name}("plssid")')
print(f"Created table: {table_name}")
else:
print(f"Table {table_name} already exists")
conn.commit()
except psycopg2.Error as e:
print(f"Error creating table {table_name}: {e}")
finally:
conn.close()
def insert_features_to_db(features, table_name, db_config):
conn = psycopg2.connect(
dbname=db_config['DB_NAME'],
user=db_config['DB_USER'],
password=db_config['DB_PASSWORD'],
host=db_config['DB_HOST'],
port=db_config['DB_PORT']
)
try:
with conn.cursor() as cur:
# Get the column names from the table
cur.execute(f"SELECT column_name FROM information_schema.columns WHERE table_name = '{table_name.split('.')[-1]}'")
db_columns = [row[0] for row in cur.fetchall() if row[0] != 'id']
# Prepare the SQL statement
sql = f"""
INSERT INTO {table_name} ({', '.join([f'"{col}"' for col in db_columns])})
VALUES %s
"""
# Prepare the template for execute_values
template = f"({', '.join(['%s' for _ in db_columns])})"
values = []
for feature in features:
geom = feature.get('geometry')
attrs = feature.get('attributes')
if geom and attrs:
rings = geom['rings'][0]
wkt = f"POLYGON(({','.join([f'{x} {y}' for x, y in rings])}))"
row = []
for col in db_columns:
if col == 'geom':
row.append(wkt)
else:
# Map database column names back to original attribute names
attr_name = col.upper()
if attr_name == 'SHAPE_STAREA':
attr_name = 'Shape.STArea()'
elif attr_name == 'SHAPE_STLENGTH':
attr_name = 'Shape.STLength()'
row.append(attrs.get(attr_name))
values.append(tuple(row))
else:
print(f"Skipping invalid feature: {feature}")
if values:
execute_values(cur, sql, values, template=template, page_size=100)
print(f"Inserted {len(values)} features")
else:
print("No valid features to insert")
conn.commit()
except Exception as e:
print(f"Error inserting features: {e}")
print(f"First feature for debugging: {features[0] if features else 'No features'}")
conn.rollback()
finally:
conn.close()
def download_and_import_layer(layer_config, db_config, gis_config, force_refresh):
url = layer_config['url']
layer_name = layer_config['layer_name']
table_name = layer_config['table_name']
batch_size = layer_config['batch_size']
delay = layer_config['delay'] / 1000 # Convert to seconds
total_count = get_feature_count(url)
print(f"Total {layer_name} features: {total_count}")
# Check existing records in the database
existing_count = get_existing_record_count(db_config, table_name)
if existing_count == total_count and not force_refresh:
print(f"Table {table_name} already contains all {total_count} features. Skipping.")
return
if force_refresh:
delete_existing_table(db_config, table_name)
create_table(db_config, table_name, gis_config)
existing_count = 0
elif existing_count == 0:
create_table(db_config, table_name, gis_config)
offset = existing_count
start_time = time.time()
try:
while offset < total_count:
batch_start_time = time.time()
print(f"Fetching {layer_name} features {offset} to {offset + batch_size}...")
try:
data = fetch_features(url, offset, batch_size)
new_features = data.get('features', [])
if not new_features:
break
insert_features_to_db(new_features, table_name, db_config)
offset += len(new_features)
batch_end_time = time.time()
batch_duration = batch_end_time - batch_start_time
print(f"Batch processed in {batch_duration:.2f} seconds")
# Progress indicator
progress = offset / total_count
bar_length = 30
filled_length = int(bar_length * progress)
bar = '=' * filled_length + '-' * (bar_length - filled_length)
print(f'\rProgress: [{bar}] {progress:.1%} ({offset}/{total_count} features)', end='', flush=True)
time.sleep(delay)
except Exception as e:
print(f"\nError processing batch starting at offset {offset}: {e}")
print("Continuing with next batch...")
offset += batch_size
end_time = time.time()
total_duration = end_time - start_time
print(f"\nTotal {layer_name} features fetched and imported: {offset}")
print(f"Total time: {total_duration:.2f} seconds")
except Exception as e:
print(f"\nError during download and import: {e}")
print(f"Last successful offset: {offset}")
def get_existing_record_count(db_config, table_name):
conn = psycopg2.connect(
dbname=db_config['DB_NAME'],
user=db_config['DB_USER'],
password=db_config['DB_PASSWORD'],
host=db_config['DB_HOST'],
port=db_config['DB_PORT']
)
try:
with conn.cursor() as cur:
cur.execute(f"SELECT COUNT(*) FROM {table_name}")
count = cur.fetchone()[0]
return count
except psycopg2.Error:
return 0
finally:
conn.close()
def delete_existing_table(db_config, table_name):
conn = psycopg2.connect(
dbname=db_config['DB_NAME'],
user=db_config['DB_USER'],
password=db_config['DB_PASSWORD'],
host=db_config['DB_HOST'],
port=db_config['DB_PORT']
)
try:
with conn.cursor() as cur:
# Drop the index if it exists
cur.execute(f"DROP INDEX IF EXISTS idx_{table_name.split('.')[-1]}_plssid")
# Then drop the table
cur.execute(f"DROP TABLE IF EXISTS {table_name} CASCADE")
conn.commit()
print(f"Deleted existing table and index: {table_name}")
except psycopg2.Error as e:
print(f"Error deleting table {table_name}: {e}")
finally:
conn.close()
def check_postgres_connection(db_config):
try:
subprocess.run(['psql',
'-h', db_config['DB_HOST'],
'-p', db_config['DB_PORT'],
'-U', db_config['DB_USER'],
'-d', db_config['DB_NAME'],
'-c', 'SELECT 1;'],
check=True, capture_output=True, text=True)
return True
except subprocess.CalledProcessError:
return False
def check_postgis_extension(db_config):
try:
result = subprocess.run(['psql',
'-h', db_config['DB_HOST'],
'-p', db_config['DB_PORT'],
'-U', db_config['DB_USER'],
'-d', db_config['DB_NAME'],
'-c', "SELECT 1 FROM pg_extension WHERE extname = 'postgis';"],
check=True, capture_output=True, text=True)
return '1' in result.stdout
except subprocess.CalledProcessError:
return False
def create_postgis_extension(db_config):
try:
subprocess.run(['psql',
'-h', db_config['DB_HOST'],
'-p', db_config['DB_PORT'],
'-U', db_config['DB_USER'],
'-d', db_config['DB_NAME'],
'-c', "CREATE EXTENSION IF NOT EXISTS postgis;"],
check=True, capture_output=True, text=True)
print("PostGIS extension created successfully.")
except subprocess.CalledProcessError as e:
print(f"Error creating PostGIS extension: {e}")
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description="Download and import PLSS data")
parser.add_argument("--force-refresh", nargs='*', help="Force refresh of specified layers or all if none specified")
args = parser.parse_args()
sys_config, gis_config = load_config()
db_config = get_db_config(sys_config)
if not check_postgres_connection(db_config):
print("Error: Unable to connect to PostgreSQL. Please check your connection settings.")
sys.exit(1)
if not check_postgis_extension(db_config):
print("PostGIS extension not found. Attempting to create it...")
create_postgis_extension(db_config)
try:
for layer in gis_config['layers']:
if args.force_refresh is None or not args.force_refresh or layer['layer_name'] in args.force_refresh:
download_and_import_layer(layer, db_config, gis_config, bool(args.force_refresh))
except requests.exceptions.RequestException as e:
print(f"Error fetching data: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
main()

View file

@ -1,101 +0,0 @@
import asyncio
import asyncpg
import yaml
from pathlib import Path
import subprocess
async def load_config():
config_path = Path(__file__).parent.parent / 'config' / 'db.yaml'
with open(config_path, 'r') as file:
return yaml.safe_load(file)
async def get_table_size(conn, table_name):
return await conn.fetchval(f"SELECT COUNT(*) FROM {table_name}")
async def check_postgres_version(conn):
return await conn.fetchval("SELECT version()")
async def replicate_table(source, target, table_name):
print(f"Replicating {table_name} from {source['ts_id']} to {target['ts_id']}")
source_conn = await asyncpg.connect(**{k: source[k] for k in ['db_name', 'db_user', 'db_pass', 'ts_ip', 'db_port']})
target_conn = await asyncpg.connect(**{k: target[k] for k in ['db_name', 'db_user', 'db_pass', 'ts_ip', 'db_port']})
try:
source_version = await check_postgres_version(source_conn)
target_version = await check_postgres_version(target_conn)
print(f"Source database version: {source_version}")
print(f"Target database version: {target_version}")
table_size = await get_table_size(source_conn, table_name)
print(f"Table size: {table_size} rows")
# Dump the table
dump_command = [
'pg_dump',
'-h', source['ts_ip'],
'-p', str(source['db_port']),
'-U', source['db_user'],
'-d', source['db_name'],
'-t', table_name,
'--no-owner',
'--no-acl'
]
env = {'PGPASSWORD': source['db_pass']}
dump_result = subprocess.run(dump_command, env=env, capture_output=True, text=True)
if dump_result.returncode != 0:
raise Exception(f"Dump failed: {dump_result.stderr}")
print("Dump completed successfully")
# Drop and recreate the table on the target
await target_conn.execute(f"DROP TABLE IF EXISTS {table_name} CASCADE")
print(f"Dropped table {table_name} on target")
# Restore the table
restore_command = [
'psql',
'-h', target['ts_ip'],
'-p', str(target['db_port']),
'-U', target['db_user'],
'-d', target['db_name'],
]
env = {'PGPASSWORD': target['db_pass']}
restore_result = subprocess.run(restore_command, input=dump_result.stdout, env=env, capture_output=True, text=True)
if restore_result.returncode != 0:
raise Exception(f"Restore failed: {restore_result.stderr}")
print(f"Table {table_name} restored successfully")
# Verify the number of rows in the target table
target_size = await get_table_size(target_conn, table_name)
if target_size == table_size:
print(f"Replication successful. {target_size} rows copied.")
else:
print(f"Warning: Source had {table_size} rows, but target has {target_size} rows.")
except Exception as e:
print(f"An error occurred while replicating {table_name}: {str(e)}")
finally:
await source_conn.close()
await target_conn.close()
async def main():
config = await load_config()
source_server = config['POOL'][0] # sij-mbp16
target_servers = config['POOL'][1:] # sij-vm and sij-vps
tables_to_replicate = [
'click_logs', 'dailyweather', 'hourlyweather', 'locations', 'short_urls'
]
for table_name in tables_to_replicate:
for target_server in target_servers:
await replicate_table(source_server, target_server, table_name)
print("All replications completed!")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -1,132 +0,0 @@
#!/usr/bin/env python3
import os
import yaml
import subprocess
import time
from tqdm import tqdm
def load_config():
script_dir = os.path.dirname(os.path.abspath(__file__))
sys_config_path = os.path.join(script_dir, '..', 'config', 'sys.yaml')
gis_config_path = os.path.join(script_dir, '..', 'config', 'gis.yaml')
with open(sys_config_path, 'r') as f:
sys_config = yaml.safe_load(f)
with open(gis_config_path, 'r') as f:
gis_config = yaml.safe_load(f)
return sys_config, gis_config
def get_table_size(server, table_name):
env = os.environ.copy()
env['PGPASSWORD'] = server['db_pass']
command = [
'psql',
'-h', server['ts_ip'],
'-p', str(server['db_port']),
'-U', server['db_user'],
'-d', server['db_name'],
'-t',
'-c', f"SELECT COUNT(*) FROM {table_name}"
]
result = subprocess.run(command, env=env, capture_output=True, text=True, check=True)
return int(result.stdout.strip())
def replicate_table(source, targets, table_name):
print(f"Replicating {table_name}")
# Get table size for progress bar
table_size = get_table_size(source, table_name)
print(f"Table size: {table_size} rows")
# Dump the table from the source
dump_command = [
'pg_dump',
'-h', source['ts_ip'],
'-p', str(source['db_port']),
'-U', source['db_user'],
'-d', source['db_name'],
'-t', table_name,
'--no-owner',
'--no-acl'
]
env = os.environ.copy()
env['PGPASSWORD'] = source['db_pass']
print("Dumping table...")
with open(f"{table_name}.sql", 'w') as f:
subprocess.run(dump_command, env=env, stdout=f, check=True)
print("Dump complete")
# Restore the table to each target
for target in targets:
print(f"Replicating to {target['ts_id']}")
# Drop table and its sequence
drop_commands = [
f"DROP TABLE IF EXISTS {table_name} CASCADE;",
f"DROP SEQUENCE IF EXISTS {table_name}_id_seq CASCADE;"
]
restore_command = [
'psql',
'-h', target['ts_ip'],
'-p', str(target['db_port']),
'-U', target['db_user'],
'-d', target['db_name'],
]
env = os.environ.copy()
env['PGPASSWORD'] = target['db_pass']
# Execute drop commands
for cmd in drop_commands:
print(f"Executing: {cmd}")
subprocess.run(restore_command + ['-c', cmd], env=env, check=True)
# Restore the table
print("Restoring table...")
process = subprocess.Popen(restore_command + ['-f', f"{table_name}.sql"], env=env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
pbar = tqdm(total=table_size, desc="Copying rows")
copied_rows = 0
for line in process.stderr:
if line.startswith("COPY"):
copied_rows = int(line.split()[1])
pbar.update(copied_rows - pbar.n)
print(line, end='') # Print all output for visibility
pbar.close()
process.wait()
if process.returncode != 0:
print(f"Error occurred during restoration to {target['ts_id']}")
print(process.stderr.read())
else:
print(f"Restoration to {target['ts_id']} completed successfully")
# Clean up the dump file
os.remove(f"{table_name}.sql")
print(f"Replication of {table_name} completed")
def main():
sys_config, gis_config = load_config()
source_server = sys_config['POOL'][0]
target_servers = sys_config['POOL'][1:]
tables = [layer['table_name'] for layer in gis_config['layers']]
for table in tables:
replicate_table(source_server, target_servers, table)
print("All replications completed!")
if __name__ == "__main__":
main()

View file

@ -1,90 +0,0 @@
#!/bin/bash
# Configuration
SOURCE_HOST="100.64.64.20"
SOURCE_PORT="5432"
SOURCE_DB="sij"
SOURCE_USER="sij"
SOURCE_PASS="Synchr0!"
# Target servers
declare -a TARGETS=(
"sij-vm:100.64.64.11:5432:sij:sij:Synchr0!"
"sij-vps:100.64.64.15:5432:sij:sij:Synchr0!"
)
# Tables to replicate
TABLES=("dailyweather" "hourlyweather" "short_urls" "click_logs" "locations")
# PostgreSQL binaries
PSQL="/Applications/Postgres.app/Contents/Versions/latest/bin/psql"
PG_DUMP="/Applications/Postgres.app/Contents/Versions/latest/bin/pg_dump"
# Function to run SQL and display results
run_sql() {
local host=$1
local port=$2
local db=$3
local user=$4
local pass=$5
local sql=$6
PGPASSWORD=$pass $PSQL -h $host -p $port -U $user -d $db -c "$sql"
}
# Replicate to a target
replicate_to_target() {
local target_info=$1
IFS=':' read -r target_name target_host target_port target_db target_user target_pass <<< "$target_info"
echo "Replicating to $target_name ($target_host)"
# Check source tables
echo "Checking source tables:"
for table in "${TABLES[@]}"; do
run_sql $SOURCE_HOST $SOURCE_PORT $SOURCE_DB $SOURCE_USER $SOURCE_PASS "SELECT COUNT(*) FROM $table;"
done
# Dump and restore each table
for table in "${TABLES[@]}"; do
echo "Replicating $table"
# Dump table
PGPASSWORD=$SOURCE_PASS $PG_DUMP -h $SOURCE_HOST -p $SOURCE_PORT -U $SOURCE_USER -d $SOURCE_DB -t $table --no-owner --no-acl > ${table}_dump.sql
if [ $? -ne 0 ]; then
echo "Error dumping $table"
continue
fi
# Drop and recreate table on target
run_sql $target_host $target_port $target_db $target_user $target_pass "DROP TABLE IF EXISTS $table CASCADE; "
# Restore table
PGPASSWORD=$target_pass $PSQL -h $target_host -p $target_port -U $target_user -d $target_db -f ${table}_dump.sql
if [ $? -ne 0 ]; then
echo "Error restoring $table"
else
echo "$table replicated successfully"
fi
# Clean up dump file
rm ${table}_dump.sql
done
# Verify replication
echo "Verifying replication:"
for table in "${TABLES[@]}"; do
echo "Checking $table on target:"
run_sql $target_host $target_port $target_db $target_user $target_pass "SELECT COUNT(*) FROM $table;"
done
}
# Main replication process
for target in "${TARGETS[@]}"; do
replicate_to_target "$target"
done
echo "Replication completed"

View file

@ -1,125 +0,0 @@
import asyncio
import asyncpg
import yaml
from pathlib import Path
import subprocess
import sys
import os
async def load_config():
config_path = Path(__file__).parent.parent / 'config' / 'db.yaml'
with open(config_path, 'r') as file:
return yaml.safe_load(file)
async def check_table_existence(conn, tables):
for table in tables:
exists = await conn.fetchval(f"""
SELECT EXISTS (
SELECT FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name = $1
)
""", table)
print(f"Table {table} {'exists' if exists else 'does not exist'} in the database.")
async def check_user_permissions(conn, tables):
for table in tables:
has_permission = await conn.fetchval(f"""
SELECT has_table_privilege(current_user, $1, 'SELECT')
""", table)
print(f"User {'has' if has_permission else 'does not have'} SELECT permission on table {table}.")
async def replicate_tables(source, target, tables):
print(f"Replicating tables from {source['ts_id']} to {target['ts_id']}")
conn_params = {
'database': 'db_name',
'user': 'db_user',
'password': 'db_pass',
'host': 'ts_ip',
'port': 'db_port'
}
source_conn = await asyncpg.connect(**{k: source[v] for k, v in conn_params.items()})
target_conn = await asyncpg.connect(**{k: target[v] for k, v in conn_params.items()})
try:
source_version = await source_conn.fetchval("SELECT version()")
target_version = await target_conn.fetchval("SELECT version()")
print(f"Source database version: {source_version}")
print(f"Target database version: {target_version}")
print("Checking table existence in source database:")
await check_table_existence(source_conn, tables)
print("\nChecking user permissions in source database:")
await check_user_permissions(source_conn, tables)
# Dump all tables to a file
dump_file = 'dump.sql'
dump_command = [
'/Applications/Postgres.app/Contents/Versions/latest/bin/pg_dump',
'-h', source['ts_ip'],
'-p', str(source['db_port']),
'-U', source['db_user'],
'-d', source['db_name'],
'-t', ' -t '.join(tables),
'--no-owner',
'--no-acl',
'-f', dump_file
]
env = {'PGPASSWORD': source['db_pass']}
print(f"\nExecuting dump command: {' '.join(dump_command)}")
dump_result = subprocess.run(dump_command, env=env, capture_output=True, text=True)
if dump_result.returncode != 0:
print(f"Dump stderr: {dump_result.stderr}")
raise Exception(f"Dump failed: {dump_result.stderr}")
print("Dump completed successfully.")
# Restore from the dump file
restore_command = [
'/Applications/Postgres.app/Contents/Versions/latest/bin/psql',
'-h', target['ts_ip'],
'-p', str(target['db_port']),
'-U', target['db_user'],
'-d', target['db_name'],
'-f', dump_file
]
env = {'PGPASSWORD': target['db_pass']}
print(f"\nExecuting restore command: {' '.join(restore_command)}")
restore_result = subprocess.run(restore_command, env=env, capture_output=True, text=True)
if restore_result.returncode != 0:
print(f"Restore stderr: {restore_result.stderr}")
raise Exception(f"Restore failed: {restore_result.stderr}")
print("Restore completed successfully.")
# Clean up the dump file
os.remove(dump_file)
except Exception as e:
print(f"An error occurred during replication: {str(e)}")
print("Exception details:", sys.exc_info())
finally:
await source_conn.close()
await target_conn.close()
async def main():
config = await load_config()
source_server = config['POOL'][0] # sij-mbp16
target_servers = config['POOL'][1:] # sij-vm and sij-vps
tables_to_replicate = [
'dailyweather', 'hourlyweather', 'short_urls', 'click_logs', 'locations'
]
for target_server in target_servers:
await replicate_tables(source_server, target_server, tables_to_replicate)
print("All replications completed!")
if __name__ == "__main__":
asyncio.run(main())

File diff suppressed because it is too large Load diff

View file

@ -1,211 +0,0 @@
import yaml
import requests
import paramiko
import time
from pathlib import Path
import logging
import subprocess
import os
import argparse
import sys
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def load_config():
config_path = Path(__file__).parent.parent / 'config' / 'sys.yaml'
with open(config_path, 'r') as file:
return yaml.safe_load(file)
def load_env():
env_path = Path(__file__).parent.parent / 'config' / '.env'
if env_path.exists():
with open(env_path, 'r') as file:
for line in file:
line = line.strip()
if line and not line.startswith('#'):
try:
key, value = line.split('=', 1)
os.environ[key.strip()] = value.strip()
except ValueError:
logging.warning(f"Skipping invalid line in .env file: {line}")
def check_server(ip, port, ts_id):
address = f"http://{ip}:{port}/id"
try:
response = requests.get(address, timeout=5)
response_text = response.text.strip().strip('"')
return response.status_code == 200 and response_text == ts_id
except requests.RequestException as e:
logging.error(f"Error checking server {ts_id}: {str(e)}")
return False
def execute_ssh_command(ssh, command):
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
output = stdout.read().decode().strip()
error = stderr.read().decode().strip()
return exit_status, output, error
def is_local_tmux_session_running(session_name):
try:
result = subprocess.run(['tmux', 'has-session', '-t', session_name], capture_output=True, text=True)
return result.returncode == 0
except subprocess.CalledProcessError:
return False
def start_local_server(server, pull=False, push=False):
try:
if is_local_tmux_session_running('sijapi'):
logging.info("Local sijapi tmux session is already running.")
return
git_command = ""
if pull:
git_command = "git pull &&"
elif push:
git_command = "git add -A . && git commit -m \"auto-update\" && git push origin --force &&"
command = f"{server['tmux']} new-session -d -s sijapi 'cd {server['path']} && {git_command} {server['conda_env']}/bin/python -m sijapi'"
logging.info(f"Executing local command: {command}")
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
logging.info(f"Successfully started sijapi session on local machine")
logging.debug(f"Command output: {result.stdout}")
except subprocess.CalledProcessError as e:
logging.error(f"Failed to start sijapi session on local machine. Error: {e}")
logging.error(f"Error output: {e.stderr}")
def start_remote_server(server, pull=False, push=False):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(
server['ts_ip'],
port=server['ssh_port'],
username=server['ssh_user'],
password=server['ssh_pass'],
timeout=10
)
status, output, error = execute_ssh_command(ssh, f"{server['tmux']} has-session -t sijapi 2>/dev/null && echo 'exists' || echo 'not exists'")
if output == 'exists':
logging.info(f"sijapi session already exists on {server['ts_id']}")
return
git_command = ""
if pull:
git_command = "git pull &&"
elif push:
git_command = "git add -A . && git commit -m \"auto-update\" && git push origin --force &&"
command = f"{server['tmux']} new-session -d -s sijapi 'cd {server['path']} && {git_command} {server['conda_env']}/bin/python -m sijapi'"
status, output, error = execute_ssh_command(ssh, command)
if status == 0:
logging.info(f"Successfully started sijapi session on {server['ts_id']}")
else:
logging.error(f"Failed to start sijapi session on {server['ts_id']}. Error: {error}")
except paramiko.SSHException as e:
logging.error(f"Failed to connect to {server['ts_id']}: {str(e)}")
finally:
ssh.close()
def kill_local_server():
try:
if is_local_tmux_session_running('sijapi'):
subprocess.run(['tmux', 'kill-session', '-t', 'sijapi'], check=True)
logging.info("Killed local sijapi tmux session.")
else:
logging.info("No local sijapi tmux session to kill.")
except subprocess.CalledProcessError as e:
logging.error(f"Failed to kill local sijapi tmux session. Error: {e}")
def kill_remote_server(server):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(
server['ts_ip'],
port=server['ssh_port'],
username=server['ssh_user'],
password=server['ssh_pass'],
timeout=10
)
command = f"{server['tmux']} kill-session -t sijapi"
status, output, error = execute_ssh_command(ssh, command)
if status == 0:
logging.info(f"Successfully killed sijapi session on {server['ts_id']}")
else:
logging.error(f"Failed to kill sijapi session on {server['ts_id']}. Error: {error}")
except paramiko.SSHException as e:
logging.error(f"Failed to connect to {server['ts_id']}: {str(e)}")
finally:
ssh.close()
def main():
load_env()
config = load_config()
pool = config['POOL']
local_ts_id = os.environ.get('TS_ID')
parser = argparse.ArgumentParser(description='Manage sijapi servers')
parser.add_argument('--kill', action='store_true', help='Kill the local sijapi tmux session')
parser.add_argument('--restart', action='store_true', help='Restart the local sijapi tmux session')
parser.add_argument('--all', action='store_true', help='Apply the action to all servers')
parser.add_argument('--pull', action='store_true', help='Pull latest changes before starting the server')
parser.add_argument('--push', action='store_true', help='Push changes before starting the server')
args = parser.parse_args()
if args.kill:
if args.all:
for server in pool:
if server['ts_id'] == local_ts_id:
kill_local_server()
else:
kill_remote_server(server)
else:
kill_local_server()
sys.exit(0)
if args.restart or args.pull or args.push:
if args.all:
for server in pool:
if server['ts_id'] == local_ts_id:
kill_local_server()
start_local_server(server, pull=args.pull, push=args.push)
else:
kill_remote_server(server)
start_remote_server(server, pull=args.pull, push=args.push)
else:
kill_local_server()
local_server = next(server for server in pool if server['ts_id'] == local_ts_id)
start_local_server(local_server, pull=args.pull, push=args.push)
sys.exit(0)
# If no specific arguments, run the default behavior
local_server = next(server for server in pool if server['ts_id'] == local_ts_id)
if not check_server(local_server['ts_ip'], local_server['app_port'], local_server['ts_id']):
logging.info(f"Local server {local_server['ts_id']} is not responding correctly. Attempting to start...")
kill_local_server()
start_local_server(local_server, push=True)
for server in pool:
if server['ts_id'] != local_ts_id:
if not check_server(server['ts_ip'], server['app_port'], server['ts_id']):
logging.info(f"{server['ts_id']} is not responding correctly. Attempting to start...")
kill_remote_server(server)
start_remote_server(server, pull=True)
else:
logging.info(f"{server['ts_id']} is running and responding correctly.")
time.sleep(1)
if __name__ == "__main__":
main()

View file

@ -1,34 +0,0 @@
from aura_sr import AuraSR
from PIL import Image
import torch
import os
# Set environment variables for MPS
os.environ['PYTORCH_MPS_HIGH_WATERMARK_RATIO'] = '0.0'
os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1'
# Initialize device as CPU for default
device = torch.device('cpu')
# Check if MPS is available
if torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
device = torch.device('mps:0')
# Overwrite the default CUDA device with MPS
torch.cuda.default_stream = device
aura_sr = AuraSR.from_pretrained("fal-ai/AuraSR").to(device)
def load_image_from_path(file_path):
return Image.open(file_path)
def upscale_and_save(original_path):
original_image = load_image_from_path(original_path)
upscaled_image = aura_sr.upscale_4x(original_image)
upscaled_image.save(original_path)
# Insert your image path
upscale_and_save("/Users/sij/workshop/sijapi/sijapi/testbed/API__00482_ 2.png")