Spaces:
Sleeping
Sleeping
| import torch | |
| import gradio as gr | |
| from diffusers import FluxPipeline | |
| import spaces | |
| from nunchaku import NunchakuFluxTransformer2dModel | |
| from nunchaku.utils import get_precision | |
| dtype=torch.bfloat16 | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| def gpu_precision(): | |
| precision = get_precision() | |
| return precision | |
| transformer = NunchakuFluxTransformer2dModel.from_pretrained( | |
| f"nunchaku-tech/nunchaku-flux.1-dev/svdq-{gpu_precision()}_r32-flux.1-dev.safetensors" | |
| ) | |
| pipeline = FluxPipeline.from_pretrained( | |
| "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=dtype | |
| ).to(device) | |
| def generate_image(prompt: str, steps: int, guidance_scale: float): | |
| if not prompt.strip(): | |
| raise gr.Error("Prompt cannot be empty.") | |
| with torch.inference_mode(), torch.autocast(device, dtype=dtype): | |
| result = pipeline( | |
| prompt=prompt, | |
| width=576, | |
| height=1024, | |
| num_inference_steps=steps, | |
| guidance_scale=guidance_scale | |
| ) | |
| return result.images[0] | |
| # Minimal Gradio UI | |
| demo = gr.Interface( | |
| fn=generate_image, | |
| inputs=[ | |
| gr.Textbox(label="Prompt", placeholder="Describe the scene..."), | |
| gr.Slider(label="Inference Steps", minimum=5, maximum=50, step=1, value=20), | |
| gr.Slider(label="Guidance Scale", minimum=0.1, maximum=10.0, step=0.1, value=3.5) | |
| ], | |
| outputs=gr.Image(label="Generated Image"), | |
| title="FLUX Image Generator", | |
| description="Prompt-based image generation using Flux + LoRA" | |
| ) | |
| demo.launch() | |