| import os, gc, random, re, inspect |
| from contextlib import nullcontext |
|
|
| from PIL import Image, ImageOps |
|
|
| import gradio as gr |
| import torch, spaces |
| from PIL import Image, ImageFilter, ImageOps |
| import numpy as np |
| import qrcode |
| from qrcode.constants import ERROR_CORRECT_H |
| from diffusers import ( |
| StableDiffusionControlNetPipeline, |
| StableDiffusionControlNetImg2ImgPipeline, |
| ControlNetModel, |
| DPMSolverMultistepScheduler, |
| StableDiffusionPipeline |
| ) |
|
|
| |
| |
| os.environ.setdefault("MPLCONFIGDIR", "/tmp/mpl") |
|
|
| |
| os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") |
| os.environ.setdefault("HF_HUB_DISABLE_PROGRESS_BARS", "1") |
|
|
| |
| HF_TOKEN = os.environ.get("HUGGINGFACE_HUB_TOKEN") or os.environ.get("HUGGINGFACEHUB_API_TOKEN") |
|
|
| |
| IS_CUDA = torch.cuda.is_available() |
| IS_MPS = getattr(torch.backends, "mps", None) and torch.backends.mps.is_available() |
| DTYPE = torch.float16 if (IS_CUDA or IS_MPS) else torch.float32 |
| DEV_TORCH = "cuda" if IS_CUDA else ("mps" if IS_MPS else "cpu") |
|
|
| def autocast_ctx(): |
| if IS_CUDA: |
| return torch.autocast(device_type="cuda", dtype=torch.float16) |
| if IS_MPS: |
| |
| return torch.autocast(device_type="mps", dtype=torch.float16) |
| return nullcontext() |
|
|
| |
| BASE_MODELS = { |
| "stable-diffusion-v1-5": "runwayml/stable-diffusion-v1-5", |
| "dream": "Lykon/dreamshaper-8", |
| } |
|
|
| |
| CN_QRMON = "monster-labs/control_v1p_sd15_qrcode_monster" |
| |
| CN_BRIGHT = "latentcat/control_v1p_sd15_brightness" |
|
|
| |
| def resize_like(im: Image.Image, width: int, height: int, method=Image.NEAREST) -> Image.Image: |
| if im.size == (width, height): |
| return im |
| return im.resize((int(width), int(height)), method) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def ensure_rgb_img(x): |
| if isinstance(x, Image.Image): |
| if x.mode in ("RGBA", "LA") or ("transparency" in x.info): |
| rgba = x.convert("RGBA") |
| white = Image.new("RGBA", rgba.size, (255, 255, 255, 255)) |
| return Image.alpha_composite(white, rgba).convert("RGB") |
| return x.convert("RGB") |
| if isinstance(x, np.ndarray): |
| a = x |
| if a.ndim == 3 and a.shape[2] == 4: |
| rgb = a[..., :3].astype(np.float32) |
| alpha = (a[..., 3:4].astype(np.float32)) / 255.0 |
| rgb = (rgb * alpha + 255.0 * (1.0 - alpha)).clip(0, 255).astype(np.uint8) |
| return Image.fromarray(rgb, "RGB") |
| if a.ndim == 2: |
| a = np.stack([a, a, a], axis=-1) |
| return Image.fromarray(a[..., :3].astype(np.uint8), "RGB") |
| if torch.is_tensor(x): |
| t = x.detach().cpu() |
| if t.ndim == 3 and t.shape[0] in (1, 3, 4): |
| t = t.permute(1, 2, 0).numpy() |
| return ensure_rgb_img(t) |
| arr = t.numpy() |
| return ensure_rgb_img(arr) |
| raise ValueError(f"Unsupported image type for ensure_rgb_img: {type(x)}") |
|
|
| def snap8(x: int) -> int: |
| x = max(256, min(1024, int(x))) |
| return x - (x % 8) |
|
|
| def normalize_color(c): |
| if c is None: return "white" |
| if isinstance(c, (tuple, list)): |
| r, g, b = (int(max(0, min(255, round(float(x))))) for x in c[:3]); return (r, g, b) |
| if isinstance(c, str): |
| s = c.strip() |
| if s.startswith("#"): return s |
| m = re.match(r"rgba?\(\s*([0-9.]+)\s*,\s*([0-9.]+)\s*,\s*([0-9.]+)", s, re.IGNORECASE) |
| if m: |
| r = int(max(0, min(255, round(float(m.group(1)))))) |
| g = int(max(0, min(255, round(float(m.group(2)))))) |
| b = int(max(0, min(255, round(float(m.group(3)))))) |
| return (r, g, b) |
| return s |
| return "white" |
|
|
| def enforce_qr_contrast(stylized: Image.Image, qr_img: Image.Image, strength: float = 0.0, feather: float = 1.0) -> Image.Image: |
| if strength <= 0: return stylized |
| q = qr_img.convert("L") |
| black_mask = q.point(lambda p: 255 if p < 128 else 0).filter(ImageFilter.GaussianBlur(radius=float(feather))) |
| black = np.asarray(black_mask, dtype=np.float32) / 255.0 |
| white = 1.0 - black |
| s = np.asarray(stylized.convert("RGB"), dtype=np.float32) / 255.0 |
| s = s * (1.0 - float(strength) * black[..., None]) |
| s = s + (1.0 - s) * (float(strength) * 0.85 * white[..., None]) |
| s = np.clip(s, 0.0, 1.0) |
| return Image.fromarray((s * 255.0).astype(np.uint8), mode="RGB") |
|
|
| def make_qr(url="https://example.com", size=768, border=12, back_color="#FFFFFF", blur_radius=0.0): |
| qr = qrcode.QRCode(version=None, error_correction=ERROR_CORRECT_H, box_size=10, border=int(border)) |
| qr.add_data(url.strip()); qr.make(fit=True) |
| img = qr.make_image(fill_color="black", back_color=normalize_color(back_color)).convert("RGB") |
| img = img.resize((int(size), int(size)), Image.NEAREST) |
| if blur_radius and blur_radius > 0: |
| img = img.filter(ImageFilter.GaussianBlur(radius=float(blur_radius))) |
| return img |
|
|
| def prep_qr_upload_image(qr_upload: Image.Image, size: int = 768, blur_radius: float = 0.0) -> Image.Image: |
| |
| im = qr_upload.convert("L") |
|
|
| |
| threshold = 180 |
| im = im.point(lambda p: 255 if p > threshold else 0, mode="1") |
|
|
| |
| im = im.resize((size, size), Image.NEAREST) |
|
|
| |
| im = im.convert("RGB") |
|
|
| |
| if blur_radius > 0: |
| im = im.filter(ImageFilter.GaussianBlur(radius=float(blur_radius))) |
|
|
| return im |
|
|
| |
| def prep_brightness_map(img: Image.Image, size: int, source: str, |
| blur_px: float = 3.0, gamma: float = 0.9, autocontrast: bool = True) -> Image.Image: |
| method = Image.NEAREST if source == "qr" else Image.LANCZOS |
| im = img.resize((size, size), method).convert("L") |
| if source != "qr": |
| if autocontrast: |
| im = ImageOps.autocontrast(im, cutoff=2) |
| if blur_px and blur_px > 0: |
| im = im.filter(ImageFilter.GaussianBlur(radius=float(blur_px))) |
| if gamma and gamma != 1.0: |
| arr = np.asarray(im, dtype=np.float32) / 255.0 |
| arr = np.clip(arr ** float(gamma), 0.0, 1.0) |
| im = Image.fromarray((arr * 255.0).astype(np.uint8), "L") |
| return im.convert("RGB") |
|
|
| def blend_brightness_maps(qr_img: Image.Image, |
| init_img: Image.Image, |
| size: int, |
| alpha: float, |
| blur_px: float = 2.5, |
| gamma: float = 0.9, |
| autocontrast: bool = True) -> Image.Image: |
| qr_map = prep_brightness_map(qr_img, size, "qr") |
| init_map = prep_brightness_map(init_img, size, "init", |
| blur_px=blur_px, gamma=gamma, autocontrast=autocontrast) |
| qa = np.asarray(qr_map, dtype=np.float32) |
| ia = np.asarray(init_map, dtype=np.float32) |
| a = float(alpha) |
| mix = np.clip((1.0 - a) * ia + a * qa, 0, 255).astype(np.uint8) |
| return Image.fromarray(mix, mode="RGB") |
|
|
| |
| _CN_QR = None |
| _CN_BR = None |
|
|
| _CN_TXT2IMG = {} |
| _CN_IMG2IMG = {} |
|
|
| def _base_scheduler_for(pipe): |
| pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
| pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="dpmsolver++" |
| ) |
| pipe.enable_attention_slicing() |
| pipe.enable_vae_slicing() |
| pipe.enable_model_cpu_offload() |
| return pipe |
|
|
| def get_qr_cn(): |
| global _CN_QR |
| if _CN_QR is None: |
| _CN_QR = ControlNetModel.from_pretrained( |
| CN_QRMON, torch_dtype=DTYPE, use_safetensors=True, token=HF_TOKEN |
| ) |
| return _CN_QR |
|
|
| def get_bright_cn(): |
| global _CN_BR |
| if _CN_BR is None: |
| _CN_BR = ControlNetModel.from_pretrained( |
| CN_BRIGHT, torch_dtype=DTYPE, use_safetensors=True, token=HF_TOKEN |
| ) |
| return _CN_BR |
|
|
|
|
| def get_controlnets(use_brightness: bool): |
| return [get_qr_cn(), get_bright_cn()] if use_brightness else get_qr_cn() |
|
|
| def get_txt2img_pipe(model_id: str, use_brightness: bool): |
| key = (model_id, "2cn" if use_brightness else "1cn") |
| if key not in _CN_TXT2IMG: |
| pipe = StableDiffusionControlNetPipeline.from_pretrained( |
| model_id, |
| controlnet=get_controlnets(use_brightness), |
| torch_dtype=DTYPE, |
| safety_checker=None, |
| use_safetensors=True, |
| low_cpu_mem_usage=True, |
| token=HF_TOKEN, |
| ) |
| _CN_TXT2IMG[key] = _base_scheduler_for(pipe) |
| return _CN_TXT2IMG[key] |
|
|
| def get_img2img_pipe(model_id: str, use_brightness: bool): |
| key = (model_id, "2cn" if use_brightness else "1cn") |
| if key not in _CN_IMG2IMG: |
| pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( |
| model_id, |
| controlnet=get_controlnets(use_brightness), |
| torch_dtype=DTYPE, |
| safety_checker=None, |
| use_safetensors=True, |
| low_cpu_mem_usage=True, |
| token=HF_TOKEN, |
| ) |
| _CN_IMG2IMG[key] = _base_scheduler_for(pipe) |
| return _CN_IMG2IMG[key] |
|
|
| |
| def _pick_brightness_image(mode: str, |
| qr_img: Image.Image, |
| init_img: Image.Image | None, |
| custom_img: Image.Image | None) -> Image.Image: |
| if mode == "init" and init_img is not None: |
| return init_img |
| if mode == "custom" and custom_img is not None: |
| return custom_img |
| return qr_img |
|
|
|
|
|
|
| |
| def _qr_txt2img_core(model_id: str, |
| url: str, style_prompt: str, negative: str, |
| steps: int, cfg: float, size: int, border: int, |
| qr_weight: float, seed: int, |
| use_hires: bool, hires_upscale: float, hires_strength: float, |
| repair_strength: float, feather: float, |
| control_start: float, control_end: float, |
| use_brightness: bool, bright_weight: float, |
| bright_start: float, bright_end: float, |
| bright_mode: str, bright_custom: Image.Image | None, |
| qr_upload: Image.Image | None = None): |
|
|
| s = snap8(size) |
|
|
| |
| |
| if qr_upload is not None: |
| qr_img = prep_qr_upload_image(qr_upload, size=768, blur_radius=0.0) |
| else: |
| qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF") |
|
|
|
|
| if use_brightness: |
| raw_bright_s = _pick_brightness_image(bright_mode, qr_img, None, bright_custom) |
| bright_img_s = prep_brightness_map(raw_bright_s, s, bright_mode) |
| control_images_s = [ensure_rgb_img(qr_img), ensure_rgb_img(bright_img_s)] |
| scales_s = [float(qr_weight), float(bright_weight)] |
| starts_s = [float(control_start), float(bright_start)] |
| ends_s = [float(control_end), float(bright_end)] |
| else: |
| control_images_s = ensure_rgb_img(qr_img) |
| scales_s = float(qr_weight) |
| starts_s = float(control_start) |
| ends_s = float(control_end) |
|
|
| |
| if int(seed) < 0: |
| seed = random.randint(0, 2**31 - 1) |
| gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed)) |
|
|
| |
| pipe = get_txt2img_pipe(model_id, use_brightness) |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
| gc.collect() |
|
|
| kwargs = dict( |
| prompt=str(style_prompt), |
| negative_prompt=str(negative or ""), |
| num_inference_steps=int(steps), |
| guidance_scale=float(cfg), |
| width=s, height=s, |
| generator=gen, |
| controlnet_conditioning_scale=scales_s, |
| control_guidance_start=starts_s, |
| control_guidance_end=ends_s, |
| ) |
|
|
| |
| sig = inspect.signature(pipe.__call__) |
| if "control_image" in sig.parameters: |
| kwargs["control_image"] = control_images_s |
| elif "image" in sig.parameters: |
| kwargs["image"] = control_images_s |
| else: |
| raise RuntimeError("Pipeline does not accept controlnet images") |
|
|
| with autocast_ctx(): |
| out = pipe(**kwargs) |
|
|
| lowres = out.images[0] |
|
|
| |
| final = lowres |
| qr_for_repair = qr_img |
| if use_hires: |
| up = max(1.0, min(2.0, float(hires_upscale))) |
| W = snap8(int(s * up)); H = W |
|
|
| qr_img_hi = resize_like(qr_img, W, H, method=Image.NEAREST) |
| if use_brightness: |
| raw_bright_hi = _pick_brightness_image(bright_mode, qr_img_hi, None, bright_custom) |
| bright_img_hi = prep_brightness_map(raw_bright_hi, W, bright_mode) |
| control_images_hi = [ensure_rgb_img(qr_img_hi), ensure_rgb_img(bright_img_hi)] |
| scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s |
| else: |
| control_images_hi = ensure_rgb_img(qr_img_hi) |
| scales_hi = scales_s; starts_hi = starts_s; ends_hi = ends_s |
|
|
| pipe2 = get_img2img_pipe(model_id, use_brightness) |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
| gc.collect() |
|
|
| kwargs2 = dict( |
| prompt=str(style_prompt), |
| negative_prompt=str(negative or ""), |
| image=lowres, |
| strength=float(hires_strength), |
| num_inference_steps=int(steps), |
| guidance_scale=float(cfg), |
| width=W, height=H, |
| generator=gen, |
| controlnet_conditioning_scale=scales_hi, |
| control_guidance_start=starts_hi, |
| control_guidance_end=ends_hi, |
| ) |
|
|
| sig2 = inspect.signature(pipe2.__call__) |
| if "control_image" in sig2.parameters: |
| kwargs2["control_image"] = control_images_hi |
| elif "image" in sig2.parameters: |
| kwargs2["image"] = control_images_hi |
| else: |
| raise RuntimeError("Img2Img pipeline does not accept controlnet images") |
|
|
| with autocast_ctx(): |
| out2 = pipe2(**kwargs2) |
|
|
| final = out2.images[0] |
| qr_for_repair = qr_img_hi |
|
|
| final = enforce_qr_contrast(final, qr_for_repair, |
| strength=float(repair_strength), |
| feather=float(feather)) |
| return final, lowres, qr_img |
|
|
| |
| def center_square(im: Image.Image) -> Image.Image: |
| w, h = im.size |
| if w == h: |
| return im |
| if w > h: |
| off = (w - h) // 2 |
| return im.crop((off, 0, off + h, h)) |
| else: |
| off = (h - w) // 2 |
| return im.crop((0, off, w, off + w)) |
|
|
| def prep_init_image(init_img: Image.Image, target: int) -> Image.Image: |
| s = snap8(target) |
| im = center_square(init_img.convert("RGB")) |
| return im.resize((s, s), Image.LANCZOS) |
|
|
| |
| def _qr_img2img_core(model_id: str, |
| init_image: Image.Image, |
| url: str, |
| style_prompt: str, |
| negative: str, |
| steps: int, |
| cfg: float, |
| size: int, |
| border: int, |
| qr_weight: float, |
| seed: int, |
| strength: float, |
| repair_strength: float, |
| feather: float, |
| control_start: float, control_end: float, |
| use_brightness: bool, bright_weight: float, |
| bright_start: float, bright_end: float, |
| bright_mode: str, bright_custom: Image.Image | None, |
| bright_blur_px: float = 2.5, bright_gamma: float = 0.9, bright_autocontrast: bool = True, |
| bright_mix_alpha: float = 0.65, |
| qr_upload: Image.Image | None = None): |
|
|
| s = snap8(size) |
| init = ensure_rgb_img(prep_init_image(init_image, s)) |
| |
| |
| |
| if qr_upload is not None: |
| qr_img = prep_qr_upload_image(qr_upload, size=768, blur_radius=0.0) |
| else: |
| qr_img = make_qr(url=url, size=s, border=int(border), back_color="#FFFFFF") |
|
|
| |
| if int(seed) < 0: |
| seed = random.randint(0, 2**31 - 1) |
| gen = torch.Generator(device=DEV_TORCH).manual_seed(int(seed)) |
|
|
| |
| if use_brightness: |
| if bright_mode == "mix": |
| bright_img = blend_brightness_maps( |
| qr_img, init, s, |
| alpha=float(bright_mix_alpha), |
| blur_px=float(bright_blur_px), |
| gamma=float(bright_gamma), |
| autocontrast=bool(bright_autocontrast) |
| ) |
| else: |
| raw_bright = _pick_brightness_image(bright_mode, qr_img, init, bright_custom) |
| bright_img = prep_brightness_map( |
| raw_bright, s, bright_mode, |
| blur_px=float(bright_blur_px), |
| gamma=float(bright_gamma), |
| autocontrast=bool(bright_autocontrast) |
| ) |
|
|
| control_images = [ensure_rgb_img(qr_img), ensure_rgb_img(bright_img)] |
| scales = [float(qr_weight), float(bright_weight)] |
| starts = [float(control_start), float(bright_start)] |
| ends = [float(control_end), float(bright_end)] |
| else: |
| |
| control_images = [ensure_rgb_img(qr_img)] |
| scales = [float(qr_weight)] |
| starts = [float(control_start)] |
| ends = [float(control_end)] |
|
|
| |
| if len(control_images) == 1: |
| scales = scales[0] |
| starts = starts[0] |
| ends = ends[0] |
|
|
| |
| pipe = get_img2img_pipe(model_id, use_brightness) |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
|
|
| gc.collect() |
|
|
| kwargs = dict( |
| prompt=str(style_prompt), |
| negative_prompt=str(negative or ""), |
| image=init, |
| strength=float(strength), |
| num_inference_steps=int(steps), |
| guidance_scale=float(cfg), |
| width=s, height=s, |
| generator=gen, |
| controlnet_conditioning_scale=scales, |
| control_guidance_start=starts, |
| control_guidance_end=ends, |
| ) |
|
|
| |
| sig = inspect.signature(pipe.__call__) |
| if "control_image" in sig.parameters: |
| kwargs["control_image"] = control_images |
| else: |
| kwargs["controlnet_conditioning_image"] = control_images |
|
|
| |
| with autocast_ctx(): |
| out = pipe(**kwargs) |
|
|
| final = out.images[0] |
| final = enforce_qr_contrast(final, qr_img, |
| strength=float(repair_strength), |
| feather=float(feather)) |
| return final, init, qr_img |
|
|
|
|
| |
| @spaces.GPU(duration=120) |
| def qr_img2img_blend(model_key: str, |
| init_image: Image.Image, |
| url: str, style_prompt: str, negative: str, |
| steps: int, cfg: float, size: int, border: int, |
| qr_weight: float, seed: int, |
| strength: float, |
| repair_strength: float, feather: float, |
| control_start: float, control_end: float, |
| use_brightness: bool, bright_weight: float, |
| bright_start: float, bright_end: float, |
| bright_mode: str, bright_custom: Image.Image | None, |
| bright_blur_px: float, bright_gamma: float, bright_autocontrast: bool, |
| bright_mix_alpha: float, qr_upload: Image.Image | None = None): |
| model_id = BASE_MODELS.get(model_key, BASE_MODELS["stable-diffusion-v1-5"]) |
| return _qr_img2img_core(model_id, |
| init_image, |
| url, style_prompt, negative, |
| steps, cfg, size, border, |
| qr_weight, seed, |
| strength, |
| repair_strength, feather, |
| control_start, control_end, |
| use_brightness, bright_weight, |
| bright_start, bright_end, |
| bright_mode, bright_custom, |
| bright_blur_px, bright_gamma, bright_autocontrast, |
| bright_mix_alpha, qr_upload) |
|
|
|
|
| @spaces.GPU(duration=120) |
| def qr_txt2img_sd15(*args): |
| return _qr_txt2img_core(BASE_MODELS["stable-diffusion-v1-5"], *args) |
|
|
| @spaces.GPU(duration=120) |
| def qr_txt2img_dream(*args): |
| return _qr_txt2img_core(BASE_MODELS["dream"], *args) |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("# ZeroGPU • QR Control (with optional Brightness ControlNet)") |
|
|
| |
| with gr.Tab("stable-diffusion-v1-5"): |
| url1 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") |
| qr_up = gr.Image(label="(Optional) Upload QR code", type="pil", value=None) |
| s_prompt1 = gr.Textbox(label="Style prompt", value="japanese painting, elegant shrine and torii, distant mount fuji, autumn maple trees, warm sunlight, 1girl in kimono, highly detailed, intricate patterns, anime key visual, dramatic composition") |
| s_negative1 = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, nsfw, watermark, text, low contrast, deformed, extra digits") |
| size1 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)") |
| steps1 = gr.Slider(10, 50, value=30, step=1, label="Steps") |
| cfg1 = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG") |
| border1 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") |
| qr_w1 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight") |
| seed1 = gr.Number(value=-1, precision=0, label="Seed (-1 random)") |
|
|
| cstart1 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") |
| cend1 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end") |
|
|
| use_hires1 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)") |
| hires_up1 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)") |
| hires_str1 = gr.Slider(0.30, 0.80, value=0.45, step=0.05, label="Hi-Res denoise strength") |
|
|
| repair1 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)") |
| feather1 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") |
|
|
| use_bright1 = gr.Checkbox(value=True, visible=False) |
| bright_w1 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight") |
| bright_s1 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start") |
| bright_e1 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end") |
| bright_mode1 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source") |
| bright_ref1 = gr.Image(label="(Optional) custom brightness ref", type="pil") |
|
|
| final_img1 = gr.Image(label="Final (or Hi-Res) image") |
| low_img1 = gr.Image(label="Low-res (Stage A) preview") |
| ctrl_img1 = gr.Image(label="Control QR used") |
|
|
| gr.Button("Generate with SD 1.5").click( |
| qr_txt2img_sd15, |
| [url1, s_prompt1, s_negative1, steps1, cfg1, size1, border1, qr_w1, seed1, |
| use_hires1, hires_up1, hires_str1, repair1, feather1, |
| cstart1, cend1, |
| use_bright1, bright_w1, bright_s1, bright_e1, bright_mode1, bright_ref1, qr_up], |
| [final_img1, low_img1, ctrl_img1], |
| api_name="qr_txt2img_sd15" |
| ) |
|
|
| |
| with gr.Tab("DreamShaper 8"): |
| url2 = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") |
| qr_up2 = gr.Image(label="(Optional) Upload QR code", type="pil", value=None) |
| s_prompt2 = gr.Textbox(label="Style prompt", value="ornate baroque palace interior, gilded details, chandeliers, volumetric light, ultra detailed, cinematic") |
| s_negative2 = gr.Textbox(label="Negative prompt", value="lowres, low contrast, blurry, jpeg artifacts, watermark, text, bad anatomy") |
| size2 = gr.Slider(384, 1024, value=640, step=64, label="Canvas (px)") |
| steps2 = gr.Slider(10, 50, value=30, step=1, label="Steps") |
| cfg2 = gr.Slider(1.0, 12.0, value=6.5, step=0.1, label="CFG") |
| border2 = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") |
| qr_w2 = gr.Slider(0.8, 1.8, value=1.6, step=0.05, label="QR control weight") |
| seed2 = gr.Number(value=-1, precision=0, label="Seed (-1 random)") |
|
|
| cstart2 = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") |
| cend2 = gr.Slider(0.4, 1.0, value=1.0, step=0.05, label="QR control end") |
|
|
| use_hires2 = gr.Checkbox(value=True, label="Hi-Res Fix (img2img upscale)") |
| hires_up2 = gr.Slider(1.0, 2.0, value=2.0, step=0.25, label="Hi-Res upscale (×)") |
| hires_str2 = gr.Slider(0.30, 0.80, value=0.45, step=0.05, label="Hi-Res denoise strength") |
|
|
| repair2 = gr.Slider(0.0, 1.0, value=0.0, step=0.05, label="Post repair strength (optional)") |
| feather2 = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") |
|
|
| use_bright2 = gr.Checkbox(value=True, visible=False) |
| bright_w2 = gr.Slider(0.0, 0.5, value=0.15, step=0.01, label="Brightness weight") |
| bright_s2 = gr.Slider(0.0, 0.8, value=0.10, step=0.05, label="Brightness start") |
| bright_e2 = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end") |
| bright_mode2 = gr.Radio(choices=["qr","custom"], value="qr", label="Brightness source") |
| bright_ref2 = gr.Image(label="(Optional) custom brightness ref", type="pil") |
|
|
| final_img2 = gr.Image(label="Final (or Hi-Res) image") |
| low_img2 = gr.Image(label="Low-res (Stage A) preview") |
| ctrl_img2 = gr.Image(label="Control QR used") |
|
|
| gr.Button("Generate with DreamShaper 8").click( |
| qr_txt2img_dream, |
| [url2, s_prompt2, s_negative2, steps2, cfg2, size2, border2, qr_w2, seed2, |
| use_hires2, hires_up2, hires_str2, repair2, feather2, |
| cstart2, cend2, |
| use_bright2, bright_w2, bright_s2, bright_e2, bright_mode2, bright_ref2, qr_up2], |
| [final_img2, low_img2, ctrl_img2], |
| api_name="qr_txt2img_dream" |
| ) |
|
|
| |
| with gr.Tab("Image Blend (img2img + QR)"): |
| m_key = gr.Dropdown(choices=list(BASE_MODELS.keys()), |
| value="stable-diffusion-v1-5", |
| label="Base model") |
|
|
| init_up = gr.Image(label="Upload base image", type="pil") |
|
|
| url_b = gr.Textbox(label="URL/Text", value="http://www.mybirdfire.com") |
| qr_up3 = gr.Image(label="(Optional) Upload QR code", type="pil", value=None) |
| s_prompt_b = gr.Textbox(label="Style prompt", value="highly detailed, cinematic lighting, rich textures") |
| s_negative_b = gr.Textbox(label="Negative prompt", value="ugly, low quality, blurry, watermark, text") |
|
|
| size_b = gr.Slider(384, 1024, value=768, step=64, label="Canvas (px, target)") |
| steps_b = gr.Slider(10, 50, value=30, step=1, label="Steps") |
| cfg_b = gr.Slider(1.0, 12.0, value=6.0, step=0.1, label="CFG") |
|
|
| border_b = gr.Slider(2, 20, value=12, step=1, label="QR border (quiet zone)") |
| qr_w_b = gr.Slider(0.8, 1.8, value=1.8, step=0.05, label="QR control weight") |
| seed_b = gr.Number(value=-1, precision=0, label="Seed (-1 random)") |
|
|
| strength_b = gr.Slider(0.2, 0.9, value=0.70, step=0.05, label="Img2Img denoise strength (blend amount)") |
|
|
| cstart_b = gr.Slider(0.0, 0.6, value=0.0, step=0.05, label="QR control start") |
| cend_b = gr.Slider(0.4, 1.0, value=0.95, step=0.05, label="QR control end") |
|
|
| repair_b = gr.Slider(0.0, 1.0, value=0.1, step=0.05, label="Post repair strength (optional)") |
| feather_b = gr.Slider(0.0, 3.0, value=1.0, step=0.1, label="Repair feather (px)") |
|
|
| use_bright_b = gr.Checkbox(value=False, label="Add Brightness ControlNet") |
| bright_w_b = gr.Slider(0.0, 0.5, value=0.25, step=0.01, label="Brightness weight") |
| bright_s_b = gr.Slider(0.0, 0.8, value=0.40, step=0.05, label="Brightness start") |
| bright_e_b = gr.Slider(0.2, 1.0, value=0.80, step=0.05, label="Brightness end") |
| bright_mode_b = gr.Radio(choices=["mix","qr","init","custom"], value="mix", label="Brightness source") |
| bright_ref_b = gr.Image(label="(Optional) custom brightness ref", type="pil") |
|
|
| bright_blur_b = gr.Slider(0.0, 6.0, value=2.5, step=0.1, label="Brightness blur (px)") |
| bright_gamma_b = gr.Slider(0.6, 1.2, value=0.9, step=0.01, label="Brightness gamma") |
| bright_auto_b = gr.Checkbox(value=True, label="Brightness auto-contrast") |
|
|
| bright_mix_b = gr.Slider(0.0, 1.0, value=0.65, step=0.01, label="Brightness source mix") |
|
|
| final_b = gr.Image(label="Final blended image") |
| init_b = gr.Image(label="(Resized) init image used") |
| ctrl_b = gr.Image(label="Control QR used") |
|
|
| gr.Button("Blend Uploaded Image with QR").click( |
| qr_img2img_blend, |
| [m_key, init_up, url_b, s_prompt_b, s_negative_b, steps_b, cfg_b, size_b, border_b, |
| qr_w_b, seed_b, strength_b, repair_b, feather_b, cstart_b, cend_b, |
| use_bright_b, bright_w_b, bright_s_b, bright_e_b, bright_mode_b, bright_ref_b, |
| bright_blur_b, bright_gamma_b, bright_auto_b, bright_mix_b, qr_up3], |
| [final_b, init_b, ctrl_b], |
| api_name="qr_img2img_blend" |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| demo.queue(max_size=12).launch( |
| server_name="0.0.0.0", |
| server_port=7860, |
| show_api=False, |
| share=False, |
| ) |
|
|
|
|