DiffQRCode / app.py
sayshara's picture
Changed starting params.
1548bbe
# app.py
import gradio as gr
import spaces
from diffqrcoder_wrapper import generate_qr_art, load_pipeline
import torch
DEFAULT_PROMPT = (
"whimsical biomimetic blueprint, iridescent inks swirling through "
"mechanical petals, soft gears woven with luminescent filigree"
)
DEFAULT_NEG = "easynegative"
def warmup():
"""
Run once on Space startup, on CPU only.
Downloads models & builds pipeline into cache.
"""
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
torch.set_float32_matmul_precision("high")
print("πŸ”₯ Warmup starting: downloading models & building pipeline on CPU...")
pipe = load_pipeline()
print("πŸ”₯ Warmup done. Pipeline ready on CPU.")
# Optional: return a tiny status string for UI (doesn't have to be used)
return "Warmup complete."
@spaces.GPU
def infer(
url_or_text: str,
prompt: str,
num_inference_steps: int,
controlnet_scale: float,
scanning_robust_guidance_scale: float,
perceptual_guidance_scale: float,
srmpgd_iters: int,
seed: int
):
try:
print("πŸ”§ infer() starting")
print("CUDA available?", torch.cuda.is_available())
if torch.cuda.is_available():
print("CUDA device count:", torch.cuda.device_count())
print("Current device:", torch.cuda.current_device())
print("Device name:", torch.cuda.get_device_name(0))
pipe = load_pipeline()
print("βœ… pipeline loaded on CPU")
# Attach to GPU in ZeroGPU context
pipe = pipe.to("cuda")
print("βœ… pipeline moved to CUDA")
srmpgd_num_iteration = None if srmpgd_iters == 0 else srmpgd_iters
print(
f"Params β†’ steps={num_inference_steps}, "
f"ctrl={controlnet_scale}, srg={scanning_robust_guidance_scale}, "
f"pg={perceptual_guidance_scale}, iters={srmpgd_num_iteration}"
)
img = generate_qr_art(
pipe,
url_or_text=url_or_text,
prompt=prompt,
num_inference_steps=num_inference_steps,
controlnet_conditioning_scale=controlnet_scale,
scanning_robust_guidance_scale=scanning_robust_guidance_scale,
perceptual_guidance_scale=perceptual_guidance_scale,
srmpgd_num_iteration=srmpgd_num_iteration,
seed=seed,
)
print("βœ… generation complete")
return img
except Exception as e:
print("❌ Error in infer():", repr(e))
raise
with gr.Blocks() as demo:
gr.Markdown(
r"""
# DiffQRCoder – ZeroGPU demo
Generate aesthetic, scanning-robust QR codes using the **DiffQRCoder** pipeline
([WACV 2025](https://openaccess.thecvf.com/content/WACV2025/html/Liao_DiffQRCoder_Diffusion-Based_Aesthetic_QR_Code_Generation_with_Scanning_Robustness_Guided_WACV_2025_paper.html)) πŸš€
"""
)
with gr.Row():
url = gr.Textbox(
label="QR contents (URL or text)",
value="https://example.com",
)
prompt = gr.Textbox(
label="Style prompt",
value=DEFAULT_PROMPT,
lines=3,
)
seed_input = gr.Slider(
minimum=0,
maximum=999999,
value=42,
step=1,
label="Seed",
)
with gr.Accordion("Advanced parameters", open=False):
steps = gr.Slider(
minimum=10,
maximum=60,
value=40,
step=1,
label="Diffusion steps (num_inference_steps)",
)
control_scale = gr.Slider(
minimum=0.5,
maximum=2.0,
value=1,
step=0.05,
label="ControlNet conditioning scale",
)
srg_scale = gr.Slider(
minimum=0,
maximum=800,
value=500,
step=10,
label="Scanning-robust guidance scale (srg)",
)
pg_scale = gr.Slider(
minimum=0,
maximum=10,
value=2,
step=0.5,
label="Perceptual guidance scale (pg)",
)
srmpgd_iters = gr.Slider(
minimum=0,
maximum=64,
value=6,
step=1,
label="SR-MPGD iterations (0 = disabled)",
)
btn = gr.Button("Generate QR Art ✨", variant="primary")
out = gr.Image(label="Output QR art", type="pil")
btn.click(
fn=infer,
inputs=[
url,
prompt,
steps,
control_scale,
srg_scale,
pg_scale,
srmpgd_iters,
seed_input
],
outputs=[out],
)
demo.load(fn=warmup, inputs=None, outputs=None)
demo.launch()