Auto-update: Sun Jun 30 15:51:51 PDT 2024

This commit is contained in:
sanj 2024-06-30 15:51:51 -07:00
parent c742336b62
commit 80b327a2d3
13 changed files with 2765 additions and 21 deletions

4
.gitignore vendored
View file

@ -5,10 +5,10 @@ sijapi/data/geocoder/
sijapi/data/courtlistener/
sijapi/data/tts/
sijapi/data/db/
sijapi/data/sd/workflows/private
sijapi/data/img/workflows/private
sijapi/data/*.pbf
sijapi/data/geonames.txt
sijapi/data/sd/images/
sijapi/data/img/images/
sijapi/config/*.yaml
sijapi/config/O365/
sijapi/local_only/

View file

@ -32,7 +32,7 @@ MAX_CPU_CORES = min(int(os.getenv("MAX_CPU_CORES", int(multiprocessing.cpu_count
DB = Database.from_env()
News = Configuration.load('news', 'secrets')
SD = Configuration.load('sd', 'secrets')
IMG = Configuration.load('img', 'secrets')
### Directories & general paths
ROUTER_DIR = BASE_DIR / "routers"
@ -100,15 +100,15 @@ SUMMARY_INSTRUCT_TTS = os.getenv('SUMMARY_INSTRUCT_TTS', "You are an AI assistan
### Stable diffusion
SD_IMAGE_DIR = DATA_DIR / "sd" / "images"
os.makedirs(SD_IMAGE_DIR, exist_ok=True)
SD_WORKFLOWS_DIR = DATA_DIR / "sd" / "workflows"
os.makedirs(SD_WORKFLOWS_DIR, exist_ok=True)
IMG_DIR = DATA_DIR / "img" / "images"
os.makedirs(IMG_DIR, exist_ok=True)
IMG_WORKFLOWS_DIR = DATA_DIR / "img" / "workflows"
os.makedirs(IMG_WORKFLOWS_DIR, exist_ok=True)
COMFYUI_URL = os.getenv('COMFYUI_URL', "http://localhost:8188")
COMFYUI_DIR = Path(os.getenv('COMFYUI_DIR'))
COMFYUI_OUTPUT_DIR = COMFYUI_DIR / 'output'
COMFYUI_LAUNCH_CMD = os.getenv('COMFYUI_LAUNCH_CMD', 'mamba activate comfyui && python main.py')
SD_CONFIG_PATH = CONFIG_DIR / 'sd.yaml'
IMG_CONFIG_PATH = CONFIG_DIR / 'img.yaml'
### ASR
ASR_DIR = DATA_DIR / "asr"

View file

@ -56,7 +56,7 @@
#─── notes: ──────────────────────────────────────────────────────────────────────
#
# HOST_NET† and HOST_PORT comprise HOST and determine the ip and port the server binds to.
# API.URL is used to assemble URLs, e.g. in the MS authentication flow and for serving images generated on the sd router.
# API.URL is used to assemble URLs, e.g. in the MS authentication flow and for serving images generated on the img router.
# API.URL should match the base URL used to access sijapi sans endpoint, e.g. http://localhost:4444 or https://api.sij.ai
#
# † Take care here! Please ensure you understand the implications of setting HOST_NET to anything besides 127.0.0.1, and configure your firewall and router appropriately if you do. Setting HOST_NET to 0.0.0.0, for instance, opens sijapi to any device the server running it is accessible to — including potentially frightening internet randos (depending how your firewall, router, and NAT are configured).
@ -94,7 +94,7 @@ TRUSTED_SUBNETS=127.0.0.1/32,10.13.37.0/24,100.64.64.0/24
# ──────────
#
#─── router selection: ────────────────────────────────────────────────────────────
ROUTERS=asr,cal,cf,email,health,llm,loc,note,rag,sd,serve,time,tts,weather
ROUTERS=asr,cal,cf,email,health,llm,loc,note,rag,img,serve,time,tts,weather
UNLOADED=ig
#─── notes: ──────────────────────────────────────────────────────────────────────
#
@ -134,7 +134,7 @@ UNLOADED=ig
#
# ig: requires an Instagram account, with credentials and other settings
# configured separately in the ig_config.json file; relies heavily
# on the llm and sd routers which have their own dependencies.
# on the llm and img routers which have their own dependencies.
#
# loc: some endpoints work as is, but the core location tracking
# functionality requires Postgresql + PostGIS extension and are
@ -146,11 +146,11 @@ UNLOADED=ig
# note: designed for use with Obsidian plus the Daily Notes and Tasks
# core extensions; and the Admonitions, Banners, Icons (with the
# Lucide pack), and Make.md community extensions. Moreover `notes`
# relies heavily on the cal, llm, loc, sd, summarize, time, loc,
# relies heavily on the cal, llm, loc, img, summarize, time, loc,
# and weather routers and accordingly on the external
# dependencies of each.
#
# sd: requires ComfyUI plus any modules and StableDiffusion models
# img: requires ComfyUI plus any modules and StableDiffusion models
# set in sd_config and individual workflow .json files.
#
# summarize: relies on the llm router and thus requires ollama.
@ -353,7 +353,7 @@ DAY_SHORT_FMT="%Y-%m-%d"
#
# DEFAULT_LLM is self-explanatory; DEFAULT_VISION is used for image recognition within
# a multimodal chat context, such as on the ig module for generating intelligible
# comments to Instagram posts, or more realistic captions for sd-generated images.
# comments to Instagram posts, or more realistic captions for img-generated images.
#
# Note it's possible to specify a separate model for general purposes and for
# summarization tasks. The other SUMMARY_ variables call for some explanation,

View file

@ -0,0 +1,39 @@
scenes:
- scene: default
triggers:
- ""
API_PrePrompt: "Highly-detailed image of "
API_StylePrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
API_NegativePrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
llm_sys_msg: "You are a helpful AI who assists in generating prompts that will be used to generate highly realistic images. Distill the described image or scene to its visual essence, in the form of the most evocative and visually descriptive words, phrases, and sentence fragments. Emphasize tone, lighting, composition, and any interesting juxtapositions between foreground and background, or center of frame and outer frame areas. Strive for nuance and originality. Avoid cliches, common tropes, and sentimentality. Avoid vague or abstract concepts. Avoid any words or descriptions based on emotion or any senses besides vision. Strive to show rather than tell. Space is limited, so be efficient with your words."
llm_pre_prompt: "Using the most visually descriptive sentence fragments, phrases, and words, distill the scene description to its essence, staying true to what it describes: "
prompt_model: "dolphin-llama3:70b"
workflows:
- workflow: default.json
size: 1024x768
- scene: wallpaper
triggers:
- wallpaper
API_PrePrompt: "Stunning widescreen image of "
API_StylePrompt: ", masterpiece, subtle, nuanced, best quality, ultra detailed, ultra high resolution, 8k, documentary, american transcendental, cinematic, filmic, moody, dynamic lighting, realistic, wallpaper, landscape photography, professional, earthporn, eliot porter, frans lanting, daniel kordan, landscape photography, ultra detailed, earth tones, moody"
API_NegativePrompt: "3d, bad art, illustrated, deformed, blurry, duplicate, video game, render, anime, cartoon, fake, tiling, out of frame, bad art, bad anatomy, 3d render, nsfw, worst quality, low quality, text, watermark, Thomas Kinkade, sentimental, kitsch, kitschy, twee, commercial, holiday card, comic, cartoon"
llm_sys_msg: "You are a helpful AI who assists in generating prompts that will be used to generate highly realistic images. Distill the described image or scene to its visual essence, in the form of the most evocative and visually descriptive words, phrases, and sentence fragments. Emphasize tone, lighting, composition, and any interesting juxtapositions between foreground and background, or center of frame and outer frame areas. Strive for nuance and originality. Avoid cliches, common tropes, and sentimentality. Avoid vague or abstract concepts. Avoid any words or descriptions based on emotion or any senses besides vision. Strive to show rather than tell. Space is limited, so be efficient with your words."
llm_pre_prompt: "Using the most visually descriptive sentence fragments, phrases, and words, distill the scene description to its essence, staying true to what it describes: "
prompt_model: "dolphin-llama3:70b"
workflows:
- workflow: wallpaper.json
size: 1024x640
- scene: portrait
triggers:
- portrait
- profile
- headshot
API_PrePrompt: "Highly-detailed portrait photo of "
API_StylePrompt: "; attractive, cute, (((masterpiece))); ((beautiful lighting)), subdued, fine detail, extremely sharp, 8k, insane detail, dynamic lighting, cinematic, best quality, ultra detailed."
API_NegativePrompt: "canvas frame, 3d, bad art, illustrated, deformed, blurry, duplicate, bad anatomy, worst quality, low quality, watermark, FastNegativeV2, easynegative, epiCNegative, easynegative, verybadimagenegative_v1.3, nsfw, nude"
llm_sys_msg: "You are a helpful AI who assists in refining prompts that will be used to generate highly realistic portrait photos. Upon receiving a prompt, you refine it by simplifying and distilling it to its essence, retaining the most visually evocative and distinct elements from what was provided, focusing in particular on the pictured individual's eyes, pose, and other distinctive features. You may infer some visual details that were not provided in the prompt, so long as they are consistent with the rest of the prompt. Always use the most visually descriptive terms possible, and avoid any vague or abstract concepts. Do not include any words or descriptions based on other senses or emotions. Strive to show rather than tell. Space is limited, so be efficient with your words. Remember that the final product will be a still image, and action verbs are not as helpful as simple descriptions of position, appearance, background, etc."
llm_pre_prompt: "Using the most visually descriptive sentence fragments, phrases, and words, distill this portrait photo to its essence: "
prompt_model: "dolphin-llama3:70b"
workflows:
- workflow: selfie.json
size: 768x1024

View file

@ -0,0 +1,586 @@
{
"4": {
"inputs": {
"ckpt_name": "Other/dreamshaperXL_v21TurboDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"9": {
"inputs": {
"filename_prefix": "API_",
"images": [
"27",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"11": {
"inputs": {
"batch_size": 1,
"width": 1023,
"height": 1025,
"resampling": "bicubic",
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0.1,
"frame": 1,
"scale": 2,
"octaves": 8,
"persistence": 3,
"lacunarity": 4,
"exponent": 2,
"brightness": 0,
"contrast": 0,
"clamp_min": 0,
"clamp_max": 1,
"seed": 477685752000597,
"device": "cpu",
"optional_vae": [
"4",
2
]
},
"class_type": "Perlin Power Fractal Latent (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Noise 🦚"
}
},
"13": {
"inputs": {
"seed": 686880884118590,
"steps": 10,
"cfg": 1.8,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"start_at_step": 0,
"end_at_step": 10000,
"enable_denoise": "false",
"denoise": 1,
"add_noise": "enable",
"return_with_leftover_noise": "disable",
"noise_type": "brownian_fractal",
"noise_blending": "cuberp",
"noise_mode": "additive",
"scale": 1,
"alpha_exponent": 1,
"modulator": 1.05,
"sigma_tolerance": 0.5,
"boost_leading_sigma": "true",
"guide_use_noise": "true",
"model": [
"4",
0
],
"positive": [
"20",
0
],
"negative": [
"7",
0
],
"latent_image": [
"11",
0
],
"ppf_settings": [
"14",
0
],
"ch_settings": [
"15",
0
]
},
"class_type": "Power KSampler Advanced (PPF Noise)",
"_meta": {
"title": "Power KSampler Advanced 🦚"
}
},
"14": {
"inputs": {
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0,
"frame": 0,
"scale": 2.5,
"octaves": 5,
"persistence": 4,
"lacunarity": 3,
"exponent": 2,
"brightness": -0.1,
"contrast": -0.1
},
"class_type": "Perlin Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Settings 🦚"
}
},
"15": {
"inputs": {
"frequency": 332.65500000000003,
"octaves": 32,
"persistence": 1.4000000000000001,
"num_colors": 128,
"color_tolerance": 0.05,
"angle_degrees": 45,
"brightness": -0.25,
"contrast": 0,
"blur": 1.3
},
"class_type": "Cross-Hatch Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Cross-Hatch Power Fractal Settings 🦚"
}
},
"20": {
"inputs": {
"conditioning_1": [
"6",
0
],
"conditioning_2": [
"21",
0
]
},
"class_type": "ConditioningCombine",
"_meta": {
"title": "Conditioning (Combine)"
}
},
"21": {
"inputs": {
"text": "API_StylePrompt",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"23": {
"inputs": {
"conditioning": [
"7",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"26": {
"inputs": {
"upscale_model": [
"58",
0
],
"image": [
"39",
0
]
},
"class_type": "ImageUpscaleWithModel",
"_meta": {
"title": "Upscale Image (using Model)"
}
},
"27": {
"inputs": {
"factor": 0.25,
"interpolation_mode": "bicubic",
"image": [
"30",
0
]
},
"class_type": "JWImageResizeByFactor",
"_meta": {
"title": "Image Resize by Factor"
}
},
"30": {
"inputs": {
"blur_radius": 3,
"sigma": 1.5,
"image": [
"26",
0
]
},
"class_type": "ImageBlur",
"_meta": {
"title": "ImageBlur"
}
},
"38": {
"inputs": {
"samples": [
"13",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"39": {
"inputs": {
"noise_seed": 690275685743412,
"steps": 16,
"cfg": 7.5,
"base_ratio": 0.85,
"denoise": 0.25,
"scaled_width": [
"60",
0
],
"scaled_height": [
"64",
0
],
"noise_offset": 1,
"refiner_strength": 1,
"softness": 0,
"base_model": [
"40",
0
],
"base_positive": [
"45",
0
],
"base_negative": [
"46",
0
],
"refiner_model": [
"42",
0
],
"refiner_positive": [
"43",
0
],
"refiner_negative": [
"44",
0
],
"image": [
"38",
0
],
"vae": [
"41",
2
],
"sampler_name": [
"47",
0
],
"scheduler": [
"47",
1
],
"upscale_model": [
"58",
0
]
},
"class_type": "SeargeSDXLImage2ImageSampler2",
"_meta": {
"title": "Image2Image Sampler v2 (Searge)"
}
},
"40": {
"inputs": {
"lora_name": "SDXL/SDXLLandskaper_v1-000003.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"48",
0
],
"clip": [
"48",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"41": {
"inputs": {
"ckpt_name": "SDXL/realismEngineSDXL_v20VAE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"42": {
"inputs": {
"ckpt_name": "SDXL/sdxl_refiner_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"43": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"44": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"45": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"46": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"47": {
"inputs": {
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras"
},
"class_type": "SeargeSamplerInputs",
"_meta": {
"title": "Sampler Settings"
}
},
"48": {
"inputs": {
"lora_name": "SDXL/add-detail-xl.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"41",
0
],
"clip": [
"41",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"50": {
"inputs": {
"text": "API_PrePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"51": {
"inputs": {
"text": "API_NegativePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"52": {
"inputs": {
"image": [
"38",
0
]
},
"class_type": "Image Size to Number",
"_meta": {
"title": "Image Size to Number"
}
},
"55": {
"inputs": {
"op": "a * b",
"a": [
"52",
2
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"58": {
"inputs": {
"model_name": "4x_foolhardy_Remacri.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": {
"title": "Load Upscale Model"
}
},
"59": {
"inputs": {
"op": "a * b",
"a": [
"52",
3
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"60": {
"inputs": {
"value": [
"55",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"64": {
"inputs": {
"value": [
"59",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"65": {
"inputs": {
"value": 2
},
"class_type": "JWFloat",
"_meta": {
"title": "Float"
}
}
}

View file

@ -0,0 +1,586 @@
{
"4": {
"inputs": {
"ckpt_name": "Other/dreamshaperXL_v21TurboDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"9": {
"inputs": {
"filename_prefix": "API_",
"images": [
"27",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"11": {
"inputs": {
"batch_size": 1,
"width": 1023,
"height": 1025,
"resampling": "bicubic",
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0.1,
"frame": 1,
"scale": 2,
"octaves": 8,
"persistence": 3,
"lacunarity": 4,
"exponent": 2,
"brightness": 0,
"contrast": 0,
"clamp_min": 0,
"clamp_max": 1,
"seed": 477685752000597,
"device": "cpu",
"optional_vae": [
"4",
2
]
},
"class_type": "Perlin Power Fractal Latent (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Noise 🦚"
}
},
"13": {
"inputs": {
"seed": 686880884118590,
"steps": 10,
"cfg": 1.8,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"start_at_step": 0,
"end_at_step": 10000,
"enable_denoise": "false",
"denoise": 1,
"add_noise": "enable",
"return_with_leftover_noise": "disable",
"noise_type": "brownian_fractal",
"noise_blending": "cuberp",
"noise_mode": "additive",
"scale": 1,
"alpha_exponent": 1,
"modulator": 1.05,
"sigma_tolerance": 0.5,
"boost_leading_sigma": "true",
"guide_use_noise": "true",
"model": [
"4",
0
],
"positive": [
"20",
0
],
"negative": [
"7",
0
],
"latent_image": [
"11",
0
],
"ppf_settings": [
"14",
0
],
"ch_settings": [
"15",
0
]
},
"class_type": "Power KSampler Advanced (PPF Noise)",
"_meta": {
"title": "Power KSampler Advanced 🦚"
}
},
"14": {
"inputs": {
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0,
"frame": 0,
"scale": 2.5,
"octaves": 5,
"persistence": 4,
"lacunarity": 3,
"exponent": 2,
"brightness": -0.1,
"contrast": -0.1
},
"class_type": "Perlin Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Settings 🦚"
}
},
"15": {
"inputs": {
"frequency": 332.65500000000003,
"octaves": 32,
"persistence": 1.4000000000000001,
"num_colors": 128,
"color_tolerance": 0.05,
"angle_degrees": 45,
"brightness": -0.25,
"contrast": 0,
"blur": 1.3
},
"class_type": "Cross-Hatch Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Cross-Hatch Power Fractal Settings 🦚"
}
},
"20": {
"inputs": {
"conditioning_1": [
"6",
0
],
"conditioning_2": [
"21",
0
]
},
"class_type": "ConditioningCombine",
"_meta": {
"title": "Conditioning (Combine)"
}
},
"21": {
"inputs": {
"text": "API_StylePrompt",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"23": {
"inputs": {
"conditioning": [
"7",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"26": {
"inputs": {
"upscale_model": [
"58",
0
],
"image": [
"39",
0
]
},
"class_type": "ImageUpscaleWithModel",
"_meta": {
"title": "Upscale Image (using Model)"
}
},
"27": {
"inputs": {
"factor": 0.25,
"interpolation_mode": "bicubic",
"image": [
"30",
0
]
},
"class_type": "JWImageResizeByFactor",
"_meta": {
"title": "Image Resize by Factor"
}
},
"30": {
"inputs": {
"blur_radius": 3,
"sigma": 1.5,
"image": [
"26",
0
]
},
"class_type": "ImageBlur",
"_meta": {
"title": "ImageBlur"
}
},
"38": {
"inputs": {
"samples": [
"13",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"39": {
"inputs": {
"noise_seed": 690275685743412,
"steps": 16,
"cfg": 7.5,
"base_ratio": 0.85,
"denoise": 0.25,
"scaled_width": [
"60",
0
],
"scaled_height": [
"64",
0
],
"noise_offset": 1,
"refiner_strength": 1,
"softness": 0,
"base_model": [
"40",
0
],
"base_positive": [
"45",
0
],
"base_negative": [
"46",
0
],
"refiner_model": [
"42",
0
],
"refiner_positive": [
"43",
0
],
"refiner_negative": [
"44",
0
],
"image": [
"38",
0
],
"vae": [
"41",
2
],
"sampler_name": [
"47",
0
],
"scheduler": [
"47",
1
],
"upscale_model": [
"58",
0
]
},
"class_type": "SeargeSDXLImage2ImageSampler2",
"_meta": {
"title": "Image2Image Sampler v2 (Searge)"
}
},
"40": {
"inputs": {
"lora_name": "SDXL/SDXLLandskaper_v1-000003.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"48",
0
],
"clip": [
"48",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"41": {
"inputs": {
"ckpt_name": "SDXL/realismEngineSDXL_v20VAE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"42": {
"inputs": {
"ckpt_name": "SDXL/sdxl_refiner_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"43": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"44": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"45": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"46": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"47": {
"inputs": {
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras"
},
"class_type": "SeargeSamplerInputs",
"_meta": {
"title": "Sampler Settings"
}
},
"48": {
"inputs": {
"lora_name": "SDXL/add-detail-xl.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"41",
0
],
"clip": [
"41",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"50": {
"inputs": {
"text": "API_PrePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"51": {
"inputs": {
"text": "API_NegativePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"52": {
"inputs": {
"image": [
"38",
0
]
},
"class_type": "Image Size to Number",
"_meta": {
"title": "Image Size to Number"
}
},
"55": {
"inputs": {
"op": "a * b",
"a": [
"52",
2
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"58": {
"inputs": {
"model_name": "4x_foolhardy_Remacri.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": {
"title": "Load Upscale Model"
}
},
"59": {
"inputs": {
"op": "a * b",
"a": [
"52",
3
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"60": {
"inputs": {
"value": [
"55",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"64": {
"inputs": {
"value": [
"59",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"65": {
"inputs": {
"value": 2
},
"class_type": "JWFloat",
"_meta": {
"title": "Float"
}
}
}

View file

@ -0,0 +1,486 @@
{
"4": {
"inputs": {
"ckpt_name": "SDXL/hassansdxl_v10.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": [
"17",
0
],
"clip": [
"15",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": [
"18",
0
],
"clip": [
"15",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"12": {
"inputs": {
"lora_name": "SDXL/styleWegg.safetensors",
"strength_model": 0.3,
"strength_clip": 0.25,
"model": [
"91",
0
],
"clip": [
"91",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"13": {
"inputs": {
"lora_name": "SDXL/add-detail-xl.safetensors",
"strength_model": 0.2,
"strength_clip": 0.2,
"model": [
"12",
0
],
"clip": [
"12",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"14": {
"inputs": {
"lora_name": "SDXL/amazing_portraits_xl_v1b.safetensors",
"strength_model": 0.5,
"strength_clip": 0.45,
"model": [
"13",
0
],
"clip": [
"13",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"15": {
"inputs": {
"lora_name": "SDXL/sd_xl_offset_example-lora_1.0.safetensors",
"strength_model": 0.2,
"strength_clip": 0.15,
"model": [
"53",
0
],
"clip": [
"53",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"17": {
"inputs": {
"text": "API_PrePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"18": {
"inputs": {
"text": "API_NegativePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"23": {
"inputs": {
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0,
"frame": 0,
"scale": 5,
"octaves": 8,
"persistence": 1.5,
"lacunarity": 2,
"exponent": 4,
"brightness": 0,
"contrast": 0
},
"class_type": "Perlin Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Settings 🦚"
}
},
"24": {
"inputs": {
"frequency": 320,
"octaves": 12,
"persistence": 1.5,
"num_colors": 16,
"color_tolerance": 0.05,
"angle_degrees": 45,
"brightness": 0,
"contrast": 0,
"blur": 2.5
},
"class_type": "Cross-Hatch Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Cross-Hatch Power Fractal Settings 🦚"
}
},
"37": {
"inputs": {
"seed": 923916094743956
},
"class_type": "Seed",
"_meta": {
"title": "Seed"
}
},
"38": {
"inputs": {
"batch_size": 1.3125,
"width": [
"95",
0
],
"height": [
"95",
1
],
"resampling": "nearest-exact",
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0,
"frame": 0,
"scale": 10,
"octaves": 8,
"persistence": 1.5,
"lacunarity": 3,
"exponent": 5,
"brightness": 0,
"contrast": 0,
"clamp_min": 0,
"clamp_max": 1,
"seed": [
"37",
3
],
"device": "cpu",
"optional_vae": [
"4",
2
],
"ppf_settings": [
"23",
0
]
},
"class_type": "Perlin Power Fractal Latent (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Noise 🦚"
}
},
"43": {
"inputs": {
"seed": [
"37",
3
],
"steps": 32,
"cfg": 8.5,
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras",
"start_at_step": 0,
"end_at_step": 10000,
"enable_denoise": "false",
"denoise": 1,
"add_noise": "enable",
"return_with_leftover_noise": "disable",
"noise_type": "brownian_fractal",
"noise_blending": "cuberp",
"noise_mode": "additive",
"scale": 1,
"alpha_exponent": 1,
"modulator": 1,
"sigma_tolerance": 0.5,
"boost_leading_sigma": "false",
"guide_use_noise": "true",
"model": [
"15",
0
],
"positive": [
"98",
0
],
"negative": [
"7",
0
],
"latent_image": [
"38",
0
],
"ppf_settings": [
"23",
0
],
"ch_settings": [
"24",
0
]
},
"class_type": "Power KSampler Advanced (PPF Noise)",
"_meta": {
"title": "Power KSampler Advanced 🦚"
}
},
"44": {
"inputs": {
"samples": [
"43",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"45": {
"inputs": {
"filename_prefix": "API_",
"images": [
"44",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"53": {
"inputs": {
"lora_name": "SDXL/PerfectEyesXL.safetensors",
"strength_model": 0.5,
"strength_clip": 0.5,
"model": [
"14",
0
],
"clip": [
"14",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"89": {
"inputs": {
"lora_name": "SDXL/ahxl_v1.safetensors",
"strength_model": 0.4,
"strength_clip": 0.33,
"model": [
"92",
0
],
"clip": [
"93",
0
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"90": {
"inputs": {
"lora_name": "SDXL/age.safetensors",
"strength_model": -0.8,
"strength_clip": -0.7000000000000001,
"model": [
"89",
0
],
"clip": [
"89",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"91": {
"inputs": {
"lora_name": "SDXL/StokeRealV1.safetensors",
"strength_model": 0.2,
"strength_clip": 0.2,
"model": [
"90",
0
],
"clip": [
"90",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"92": {
"inputs": {
"input": 0.36,
"middle": 0.5,
"out": 0.64,
"model1": [
"4",
0
],
"model2": [
"94",
0
]
},
"class_type": "ModelMergeBlocks",
"_meta": {
"title": "ModelMergeBlocks"
}
},
"93": {
"inputs": {
"ratio": 0.45,
"clip1": [
"4",
1
],
"clip2": [
"94",
1
]
},
"class_type": "CLIPMergeSimple",
"_meta": {
"title": "CLIPMergeSimple"
}
},
"94": {
"inputs": {
"ckpt_name": "SDXL/dreamshaperXL_alpha2Xl10.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"95": {
"inputs": {
"width_ratio": 5,
"height_ratio": 7,
"side_length": 1025,
"rounding_value": 64
},
"class_type": "AnyAspectRatio",
"_meta": {
"title": "AnyAspectRatio"
}
},
"96": {
"inputs": {
"text": "API_StylePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"97": {
"inputs": {
"text": [
"96",
0
],
"clip": [
"15",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"98": {
"inputs": {
"conditioning_1": [
"6",
0
],
"conditioning_2": [
"97",
0
]
},
"class_type": "ConditioningCombine",
"_meta": {
"title": "Conditioning (Combine)"
}
}
}

View file

@ -0,0 +1,586 @@
{
"4": {
"inputs": {
"ckpt_name": "Other/dreamshaperXL_v21TurboDPMSDE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"6": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"9": {
"inputs": {
"filename_prefix": "API_",
"images": [
"27",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
},
"11": {
"inputs": {
"batch_size": 1,
"width": 1023,
"height": 1025,
"resampling": "bicubic",
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0.1,
"frame": 1,
"scale": 2,
"octaves": 8,
"persistence": 3,
"lacunarity": 4,
"exponent": 2,
"brightness": 0,
"contrast": 0,
"clamp_min": 0,
"clamp_max": 1,
"seed": 477685752000597,
"device": "cpu",
"optional_vae": [
"4",
2
]
},
"class_type": "Perlin Power Fractal Latent (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Noise 🦚"
}
},
"13": {
"inputs": {
"seed": 686880884118590,
"steps": 10,
"cfg": 1.8,
"sampler_name": "dpmpp_sde",
"scheduler": "karras",
"start_at_step": 0,
"end_at_step": 10000,
"enable_denoise": "false",
"denoise": 1,
"add_noise": "enable",
"return_with_leftover_noise": "disable",
"noise_type": "brownian_fractal",
"noise_blending": "cuberp",
"noise_mode": "additive",
"scale": 1,
"alpha_exponent": 1,
"modulator": 1.05,
"sigma_tolerance": 0.5,
"boost_leading_sigma": "true",
"guide_use_noise": "true",
"model": [
"4",
0
],
"positive": [
"20",
0
],
"negative": [
"7",
0
],
"latent_image": [
"11",
0
],
"ppf_settings": [
"14",
0
],
"ch_settings": [
"15",
0
]
},
"class_type": "Power KSampler Advanced (PPF Noise)",
"_meta": {
"title": "Power KSampler Advanced 🦚"
}
},
"14": {
"inputs": {
"X": 0,
"Y": 0,
"Z": 0,
"evolution": 0,
"frame": 0,
"scale": 2.5,
"octaves": 5,
"persistence": 4,
"lacunarity": 3,
"exponent": 2,
"brightness": -0.1,
"contrast": -0.1
},
"class_type": "Perlin Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Perlin Power Fractal Settings 🦚"
}
},
"15": {
"inputs": {
"frequency": 332.65500000000003,
"octaves": 32,
"persistence": 1.4000000000000001,
"num_colors": 128,
"color_tolerance": 0.05,
"angle_degrees": 45,
"brightness": -0.25,
"contrast": 0,
"blur": 1.3
},
"class_type": "Cross-Hatch Power Fractal Settings (PPF Noise)",
"_meta": {
"title": "Cross-Hatch Power Fractal Settings 🦚"
}
},
"20": {
"inputs": {
"conditioning_1": [
"6",
0
],
"conditioning_2": [
"21",
0
]
},
"class_type": "ConditioningCombine",
"_meta": {
"title": "Conditioning (Combine)"
}
},
"21": {
"inputs": {
"text": "API_StylePrompt",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"23": {
"inputs": {
"conditioning": [
"7",
0
]
},
"class_type": "ConditioningZeroOut",
"_meta": {
"title": "ConditioningZeroOut"
}
},
"26": {
"inputs": {
"upscale_model": [
"58",
0
],
"image": [
"39",
0
]
},
"class_type": "ImageUpscaleWithModel",
"_meta": {
"title": "Upscale Image (using Model)"
}
},
"27": {
"inputs": {
"factor": 0.25,
"interpolation_mode": "bicubic",
"image": [
"30",
0
]
},
"class_type": "JWImageResizeByFactor",
"_meta": {
"title": "Image Resize by Factor"
}
},
"30": {
"inputs": {
"blur_radius": 3,
"sigma": 1.5,
"image": [
"26",
0
]
},
"class_type": "ImageBlur",
"_meta": {
"title": "ImageBlur"
}
},
"38": {
"inputs": {
"samples": [
"13",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"39": {
"inputs": {
"noise_seed": 690275685743412,
"steps": 16,
"cfg": 7.5,
"base_ratio": 0.85,
"denoise": 0.25,
"scaled_width": [
"60",
0
],
"scaled_height": [
"64",
0
],
"noise_offset": 1,
"refiner_strength": 1,
"softness": 0,
"base_model": [
"40",
0
],
"base_positive": [
"45",
0
],
"base_negative": [
"46",
0
],
"refiner_model": [
"42",
0
],
"refiner_positive": [
"43",
0
],
"refiner_negative": [
"44",
0
],
"image": [
"38",
0
],
"vae": [
"41",
2
],
"sampler_name": [
"47",
0
],
"scheduler": [
"47",
1
],
"upscale_model": [
"58",
0
]
},
"class_type": "SeargeSDXLImage2ImageSampler2",
"_meta": {
"title": "Image2Image Sampler v2 (Searge)"
}
},
"40": {
"inputs": {
"lora_name": "SDXL/SDXLLandskaper_v1-000003.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"48",
0
],
"clip": [
"48",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"41": {
"inputs": {
"ckpt_name": "SDXL/realismEngineSDXL_v20VAE.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"42": {
"inputs": {
"ckpt_name": "SDXL/sdxl_refiner_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"43": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"44": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"42",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"45": {
"inputs": {
"text": [
"50",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"46": {
"inputs": {
"text": [
"51",
0
],
"clip": [
"40",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"47": {
"inputs": {
"sampler_name": "dpmpp_2m_sde",
"scheduler": "karras"
},
"class_type": "SeargeSamplerInputs",
"_meta": {
"title": "Sampler Settings"
}
},
"48": {
"inputs": {
"lora_name": "SDXL/add-detail-xl.safetensors",
"strength_model": 1,
"strength_clip": 1,
"model": [
"41",
0
],
"clip": [
"41",
1
]
},
"class_type": "LoraLoader",
"_meta": {
"title": "Load LoRA"
}
},
"50": {
"inputs": {
"text": "API_PrePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"51": {
"inputs": {
"text": "API_NegativePrompt"
},
"class_type": "JWStringMultiline",
"_meta": {
"title": "String (Multiline)"
}
},
"52": {
"inputs": {
"image": [
"38",
0
]
},
"class_type": "Image Size to Number",
"_meta": {
"title": "Image Size to Number"
}
},
"55": {
"inputs": {
"op": "a * b",
"a": [
"52",
2
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"58": {
"inputs": {
"model_name": "4x_foolhardy_Remacri.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": {
"title": "Load Upscale Model"
}
},
"59": {
"inputs": {
"op": "a * b",
"a": [
"52",
3
],
"b": [
"65",
0
],
"c": 0
},
"class_type": "SeargeFloatMath",
"_meta": {
"title": "Float Math"
}
},
"60": {
"inputs": {
"value": [
"55",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"64": {
"inputs": {
"value": [
"59",
0
],
"mode": "round"
},
"class_type": "JWFloatToInteger",
"_meta": {
"title": "Float to Integer"
}
},
"65": {
"inputs": {
"value": 2
},
"class_type": "JWFloat",
"_meta": {
"title": "Float"
}
}
}

View file

@ -21,7 +21,7 @@ import yaml
from typing import List, Dict, Optional, Set
from datetime import datetime as dt_datetime
from sijapi import L, PODCAST_DIR, DEFAULT_VOICE, EMAIL_CONFIG, EMAIL_LOGS
from sijapi.routers import loc, tts, llm, sd
from sijapi.routers import img, loc, tts, llm
from sijapi.utilities import clean_text, assemble_journal_path, extract_text, prefix_lines
from sijapi.classes import EmailAccount, IMAPConfig, SMTPConfig, IncomingEmail, EmailContact, AutoResponder
from sijapi.classes import EmailAccount
@ -302,7 +302,7 @@ async def autorespond_single_email(message, uid_str: str, account: EmailAccount,
if response_body:
subject = f"Re: {this_email.subject}"
# add back scene=profile.image_scene, to workflow call
jpg_path = await sd.workflow(profile.image_prompt, earlyout=False, downscale_to_fit=True) if profile.image_prompt else None
jpg_path = await img.workflow(profile.image_prompt, earlyout=False, downscale_to_fit=True) if profile.image_prompt else None
success = await send_response(this_email.sender, subject, response_body, profile, jpg_path)
if success:
L.WARN(f"Auto-responded to email: {this_email.subject}")

View file

@ -32,7 +32,7 @@ from urllib.parse import urlparse
from instagrapi.exceptions import LoginRequired as ClientLoginRequiredError
import json
from ollama import Client as oLlama
from sd import sd
from sijapi.routers.img import img
from dotenv import load_dotenv
from sijapi import L, COMFYUI_DIR

461
sijapi/routers/img.py Normal file
View file

@ -0,0 +1,461 @@
'''
Image generation module using StableDiffusion and similar models by way of ComfyUI.
DEPENDS ON:
LLM module
COMFYUI_URL, COMFYUI_DIR, COMFYUI_OUTPUT_DIR, TS_SUBNET, TS_ADDRESS, DATA_DIR, SD_CONFIG_DIR, SD_IMAGE_DIR, SD_WORKFLOWS_DIR, LOCAL_HOSTS, API.URL, PHOTOPRISM_USER*, PHOTOPRISM_URL*, PHOTOPRISM_PASS*
*unimplemented.
'''
from fastapi import APIRouter, Request, Response, Query
from starlette.datastructures import Address
from fastapi.responses import JSONResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from aiohttp import ClientSession, ClientTimeout
import aiofiles
from PIL import Image
from pathlib import Path
import uuid
import json
import yaml
import ipaddress
import socket
import subprocess
import os, re, io
import random
from io import BytesIO
import base64
import asyncio
import shutil
# from photoprism.Session import Session
# from photoprism.Photo import Photo
# from webdav3.client import Client
from sijapi.routers.llm import query_ollama
from sijapi import API, L, COMFYUI_URL, COMFYUI_OUTPUT_DIR, SD_CONFIG_PATH, SD_IMAGE_DIR, SD_WORKFLOWS_DIR
img = APIRouter()
CLIENT_ID = str(uuid.uuid4())
@img.post("/img")
@img.post("/v1/images/generations")
async def sd_endpoint(request: Request):
request_data = await request.json()
prompt = request_data.get("prompt")
model = request_data.get("model")
size = request_data.get("size")
earlyurl = request_data.get("earlyurl", None)
earlyout = "web" if earlyurl else None
image_path = await workflow(prompt=prompt, scene=model, size=size, earlyout=earlyout)
if earlyout == "web":
return JSONResponse({"image_url": image_path})
# return RedirectResponse(url=image_path, status_code=303)
else:
return JSONResponse({"image_url": image_path})
@img.get("/img")
@img.get("/v1/images/generations")
async def sd_endpoint(
request: Request,
prompt: str = Query(..., description="The prompt for image generation"),
earlyout: str = Query("output", description="specify web for a redirect, or json for a json with the local path")
):
image_path = await workflow(prompt=prompt, scene="wallpaper", earlyout=earlyout)
web_path = get_web_path(image_path)
if earlyout == "web":
return RedirectResponse(url=web_path, status_code=303)
else:
return JSONResponse({"image_url": image_path})
async def workflow(prompt: str, scene: str = None, size: str = None, earlyout: str = None, destination_path: str = None, downscale_to_fit: bool = False):
scene_data = get_scene(scene)
if not scene_data:
scene_data = get_matching_scene(prompt)
prompt = scene_data.get('llm_pre_prompt') + prompt
prompt_model = scene_data.get('prompt_model')
image_concept = await query_ollama(usr=prompt, sys=scene_data.get('llm_sys_msg'), model=prompt_model, max_tokens=100)
scene_workflow = random.choice(scene_data['workflows'])
if size:
L.DEBUG(f"Specified size: {size}")
size = size if size else scene_workflow.get('size', '1024x1024')
width, height = map(int, size.split('x'))
L.DEBUG(f"Parsed width: {width}; parsed height: {height}")
workflow_path = Path(SD_WORKFLOWS_DIR) / scene_workflow['workflow']
workflow_data = json.loads(workflow_path.read_text())
post = {
"API_PrePrompt": scene_data['API_PrePrompt'] + image_concept + ', '.join(f"; (({trigger}))" for trigger in scene_data['triggers']),
"API_StylePrompt": scene_data['API_StylePrompt'],
"API_NegativePrompt": scene_data['API_NegativePrompt'],
"width": width,
"height": height
}
saved_file_key = await update_prompt_and_get_key(workflow=workflow_data, post=post, positive=image_concept)
print(f"Saved file key: {saved_file_key}")
prompt_id = await queue_prompt(workflow_data)
print(f"Prompt ID: {prompt_id}")
max_size = max(width, height) if downscale_to_fit else None
destination_path = Path(destination_path).with_suffix(".jpg") if destination_path else SD_IMAGE_DIR / f"{prompt_id}.jpg"
if earlyout:
asyncio.create_task(generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path))
L.DEBUG(f"Returning {destination_path}")
return destination_path
else:
await generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path)
L.DEBUG(f"Returning {destination_path}")
return destination_path
async def generate_and_save_image(prompt_id, saved_file_key, max_size, destination_path):
try:
status_data = await poll_status(prompt_id)
image_data = await get_image(status_data, saved_file_key)
jpg_file_path = await save_as_jpg(image_data, prompt_id, quality=90, max_size=max_size, destination_path=destination_path)
if Path(jpg_file_path) != Path(destination_path):
L.ERR(f"Mismatch between jpg_file_path, {jpg_file_path}, and detination_path, {destination_path}")
except Exception as e:
print(f"Error in generate_and_save_image: {e}")
return None
def get_web_path(file_path: Path) -> str:
uri = file_path.relative_to(SD_IMAGE_DIR)
web_path = f"{API.URL}/img/{uri}"
return web_path
async def poll_status(prompt_id):
"""Asynchronously poll the job status until it's complete and return the status data."""
start_time = asyncio.get_event_loop().time()
async with ClientSession() as session:
while True:
elapsed_time = int(asyncio.get_event_loop().time() - start_time)
async with session.get(f"{COMFYUI_URL}/history/{prompt_id}") as response:
if response.status != 200:
raise Exception("Failed to get job status")
status_data = await response.json()
job_data = status_data.get(prompt_id, {})
if job_data.get("status", {}).get("completed", False):
print(f"{prompt_id} completed in {elapsed_time} seconds.")
return job_data
await asyncio.sleep(1)
async def get_image(status_data, key):
"""Asynchronously extract the filename and subfolder from the status data and read the file."""
try:
outputs = status_data.get("outputs", {})
images_info = outputs.get(key, {}).get("images", [])
if not images_info:
raise Exception("No images found in the job output.")
image_info = images_info[0]
filename = image_info.get("filename")
subfolder = image_info.get("subfolder", "")
file_path = os.path.join(COMFYUI_OUTPUT_DIR, subfolder, filename)
async with aiofiles.open(file_path, 'rb') as file:
return await file.read()
except Exception as e:
raise Exception(f"Failed to get image: {e}")
async def save_as_jpg(image_data, prompt_id, max_size = None, quality = 100, destination_path: Path = None):
destination_path_png = (SD_IMAGE_DIR / prompt_id).with_suffix(".png")
destination_path_jpg = destination_path.with_suffix(".jpg") if destination_path else (SD_IMAGE_DIR / prompt_id).with_suffix(".jpg")
try:
destination_path_png.parent.mkdir(parents=True, exist_ok=True)
destination_path_jpg.parent.mkdir(parents=True, exist_ok=True)
# Save the PNG
async with aiofiles.open(destination_path_png, 'wb') as f:
await f.write(image_data)
# Open, possibly resize, and save as JPG
with Image.open(destination_path_png) as img:
if max_size and max(img.size) > max_size:
ratio = max_size / max(img.size)
new_size = tuple([int(x * ratio) for x in img.size])
img = img.resize(new_size, Image.Resampling.LANCZOS)
img.convert('RGB').save(destination_path_jpg, format='JPEG', quality=quality)
# Optionally remove the PNG
os.remove(destination_path_png)
return str(destination_path_jpg)
except Exception as e:
print(f"Error processing image: {e}")
return None
def set_presets(workflow_data, preset_values):
if preset_values:
preset_node = preset_values.get('node')
preset_key = preset_values.get('key')
values = preset_values.get('values')
if preset_node and preset_key and values:
preset_value = random.choice(values)
if 'inputs' in workflow_data.get(preset_node, {}):
workflow_data[preset_node]['inputs'][preset_key] = preset_value
else:
L.DEBUG("Node not found in workflow_data")
else:
L.DEBUG("Required data missing in preset_values")
else:
L.DEBUG("No preset_values found")
def get_return_path(destination_path):
sd_dir = Path(SD_IMAGE_DIR)
if destination_path.parent.samefile(sd_dir):
return destination_path.name
else:
return str(destination_path)
def get_scene(scene):
with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file:
SD_CONFIG = yaml.safe_load(SD_CONFIG_file)
for scene_data in SD_CONFIG['scenes']:
if scene_data['scene'] == scene:
L.DEBUG(f"Found scene for \"{scene}\".")
return scene_data
return None
# This returns the scene with the most trigger words present in the provided prompt,
# or otherwise if none match it returns the first scene in the array -
# meaning the first should be considered the default scene.
def get_matching_scene(prompt):
prompt_lower = prompt.lower()
max_count = 0
scene_data = None
with open(SD_CONFIG_PATH, 'r') as SD_CONFIG_file:
SD_CONFIG = yaml.safe_load(SD_CONFIG_file)
for sc in SD_CONFIG['scenes']:
count = sum(1 for trigger in sc['triggers'] if trigger in prompt_lower)
if count > max_count:
max_count = count
scene_data = sc
if scene_data:
L.DEBUG(f"Found better-matching scene: the prompt contains {max_count} words that match triggers for {scene_data.get('name')}!")
if scene_data:
return scene_data
else:
L.DEBUG(f"No matching scenes found, falling back to default scene.")
return SD_CONFIG['scenes'][0]
import asyncio
import socket
import subprocess
from typing import Optional
async def ensure_comfy(retries: int = 4, timeout: float = 6.0):
"""
Ensures that ComfyUI is running, starting it if necessary.
Args:
retries (int): Number of connection attempts. Defaults to 3.
timeout (float): Time to wait between attempts in seconds. Defaults to 5.0.
Raises:
RuntimeError: If ComfyUI couldn't be started or connected to after all retries.
"""
for attempt in range(retries):
try:
with socket.create_connection(("127.0.0.1", 8188), timeout=2):
print("ComfyUI is already running.")
return
except (socket.timeout, ConnectionRefusedError):
if attempt == 0: # Only try to start ComfyUI on the first failed attempt
print("ComfyUI is not running. Starting it now...")
try:
tmux_command = (
"tmux split-window -h "
"\"source /Users/sij/.zshrc; cd /Users/sij/workshop/ComfyUI; "
"mamba activate comfyui && "
"python main.py; exec $SHELL\""
)
subprocess.Popen(tmux_command, shell=True)
print("ComfyUI started in a new tmux session.")
except Exception as e:
raise RuntimeError(f"Error starting ComfyUI: {e}")
print(f"Attempt {attempt + 1}/{retries} failed. Waiting {timeout} seconds before retrying...")
await asyncio.sleep(timeout)
raise RuntimeError(f"Failed to ensure ComfyUI is running after {retries} attempts with {timeout} second intervals.")
# async def upload_and_get_shareable_link(image_path):
# try:
# Set up the PhotoPrism session
# pp_session = Session(PHOTOPRISM_USER, PHOTOPRISM_PASS, PHOTOPRISM_URL, use_https=True)
# pp_session.create()
# Start import
# photo = Photo(pp_session)
# photo.start_import(path=os.path.dirname(image_path))
# Give PhotoPrism some time to process the upload
# await asyncio.sleep(5)
# Search for the uploaded photo
# photo_name = os.path.basename(image_path)
# search_results = photo.search(query=f"name:{photo_name}", count=1)
# if search_results['photos']:
# photo_uuid = search_results['photos'][0]['uuid']
# shareable_link = f"https://{PHOTOPRISM_URL}/p/{photo_uuid}"
# return shareable_link
# else:
# L.ERR("Could not find the uploaded photo details.")
# return None
# except Exception as e:
# L.ERR(f"Error in upload_and_get_shareable_link: {e}")
# return None
@img.get("/image/{prompt_id}")
async def get_image_status(prompt_id: str):
status_data = await poll_status(prompt_id)
save_image_key = None
for key, value in status_data.get("outputs", {}).items():
if "images" in value:
save_image_key = key
break
if save_image_key:
image_data = await get_image(status_data, save_image_key)
await save_as_jpg(image_data, prompt_id)
external_url = f"https://api.lone.blue/img/{prompt_id}.jpg"
return JSONResponse({"image_url": external_url})
else:
return JSONResponse(content={"status": "Processing", "details": status_data}, status_code=202)
@img.get("/image-status/{prompt_id}")
async def get_image_processing_status(prompt_id: str):
try:
status_data = await poll_status(prompt_id)
return JSONResponse(content={"status": "Processing", "details": status_data}, status_code=200)
except Exception as e:
return JSONResponse(content={"error": str(e)}, status_code=500)
@img.options("/v1/images/generations", tags=["generations"])
async def get_generation_options():
return {
"model": {
"description": "The model to use for image generation.",
"type": "string",
"example": "stable-diffusion"
},
"prompt": {
"description": "The text prompt for the image generation.",
"type": "string",
"required": True,
"example": "A beautiful sunset over the ocean."
},
"n": {
"description": "The number of images to generate.",
"type": "integer",
"default": 1,
"example": 3
},
"size": {
"description": "The size of the generated images in 'widthxheight' format.",
"type": "string",
"default": "1024x1024",
"example": "512x512"
},
"raw": {
"description": "Whether to return raw image data or not.",
"type": "boolean",
"default": False
},
"earlyurl": {
"description": "Whether to return the URL early or wait for the image to be ready.",
"type": "boolean",
"default": False
}
}
async def load_workflow(workflow_path: str, workflow:str):
workflow_path = workflow_path if workflow_path else os.path.join(SD_WORKFLOWS_DIR, f"{workflow}.json" if not workflow.endswith('.json') else workflow)
with open(workflow_path, 'r') as file:
return json.load(file)
async def update_prompt_and_get_key(workflow: dict, post: dict, positive: str):
'''
Recurses through the workflow searching for and substituting the dynamic values for API_PrePrompt, API_StylePrompt, API_NegativePrompt, width, height, and seed (random integer).
Even more important, it finds and returns the key to the filepath where the file is saved, which we need to decipher status when generation is complete.
'''
found_key = [None]
def update_recursive(workflow, path=None):
if path is None:
path = []
if isinstance(workflow, dict):
for key, value in workflow.items():
current_path = path + [key]
if isinstance(value, dict):
if value.get('class_type') == 'SaveImage' and value.get('inputs', {}).get('filename_prefix') == 'API_':
found_key[0] = key
update_recursive(value, current_path)
elif isinstance(value, list):
for index, item in enumerate(value):
update_recursive(item, current_path + [str(index)])
if value == "API_PrePrompt":
workflow[key] = post.get(value, "") + positive
elif value in ["API_StylePrompt", "API_NegativePrompt"]:
workflow[key] = post.get(value, "")
elif key in ["seed", "noise_seed"]:
workflow[key] = random.randint(1000000000000, 9999999999999)
elif key in ["width", "max_width", "scaled_width", "height", "max_height", "scaled_height", "side_length", "size", "value", "dimension", "dimensions", "long", "long_side", "short", "short_side", "length"]:
L.DEBUG(f"Got a hit for a dimension: {key} {value}")
if value == 1023:
workflow[key] = post.get("width", 1024)
L.DEBUG(f"Set {key} to {workflow[key]}.")
elif value == 1025:
workflow[key] = post.get("height", 1024)
L.DEBUG(f"Set {key} to {workflow[key]}.")
update_recursive(workflow)
return found_key[0]
async def queue_prompt(workflow_data):
await ensure_comfy()
async with ClientSession() as session:
async with session.post(f"{COMFYUI_URL}/prompt", json={"prompt": workflow_data}) as response:
if response.status == 200:
data = await response.json()
return data.get('prompt_id')
else:
raise Exception(f"Failed to queue prompt. Status code: {response.status}")

View file

@ -1,5 +1,5 @@
'''
Manages an Obsidian vault, in particular daily notes, using information and functionality drawn from the other routers, primarily calendar, email, ig, llm, rag, sd, serve, time, tts, and weather.
Manages an Obsidian vault, in particular daily notes, using information and functionality drawn from the other routers, primarily calendar, email, ig, llm, rag, img, serve, time, tts, and weather.
'''
from fastapi import APIRouter, BackgroundTasks, File, UploadFile, Form, HTTPException, Response, Query, Path as FastAPIPath
from fastapi.responses import JSONResponse, PlainTextResponse
@ -14,7 +14,7 @@ from fastapi import HTTPException, status
from pathlib import Path
from fastapi import APIRouter, Query, HTTPException
from sijapi import API, L, OBSIDIAN_VAULT_DIR, OBSIDIAN_RESOURCES_DIR, OBSIDIAN_BANNER_SCENE, DEFAULT_11L_VOICE, DEFAULT_VOICE, GEO
from sijapi.routers import cal, loc, tts, llm, time, sd, weather, asr
from sijapi.routers import cal, img, loc, tts, llm, time, weather, asr
from sijapi.utilities import assemble_journal_path, assemble_archive_path, convert_to_12_hour_format, sanitize_filename, convert_degrees_to_cardinal, check_file_name, HOURLY_COLUMNS_MAPPING
from sijapi.classes import Location
@ -270,7 +270,7 @@ async def generate_banner(dt, location: Location = None, forecast: str = None, m
prompt = await generate_context(date_time, location, forecast, mood, other_context)
L.DEBUG(f"Prompt: {prompt}")
final_path = await sd.workflow(prompt, scene=OBSIDIAN_BANNER_SCENE, destination_path=destination_path)
final_path = await img.workflow(prompt, scene=OBSIDIAN_BANNER_SCENE, destination_path=destination_path)
if not str(local_path) in str(final_path):
L.INFO(f"Apparent mismatch between local path, {local_path}, and final_path, {final_path}")
jpg_embed = f"\"![[{local_path}]]\""

View file

@ -1,5 +1,5 @@
'''
Web server module. Used by other modules when serving static content is required, e.g. the sd image generation module. Also used to serve PUBLIC_KEY.
Web server module. Used by other modules when serving static content is required, e.g. the img image generation module. Also used to serve PUBLIC_KEY.
'''
import os
import io