import gradio as gr import torch from diffusers import AutoPipelineForText2Image from huggingface_hub import hf_hub_download import os from utils import load_lora_weights, get_available_loras, generate_image # Global model cache pipeline = None current_lora = None def load_model(): """Load the Magic-Wan-Image-V2 model""" global pipeline if pipeline is None: print("Loading Magic-Wan-Image-V2 model...") pipeline = AutoPipelineForText2Image.from_pretrained( "wikeeyang/Magic-Wan-Image-V2", torch_dtype=torch.float16, variant="fp16" ) if torch.cuda.is_available(): pipeline = pipeline.to("cuda") print("Model loaded successfully!") return pipeline def generate( prompt, negative_prompt, lora_name, lora_scale, width, height, num_inference_steps, guidance_scale, seed, randomize_seed ): """Generate image from text prompt with optional LoRA""" global pipeline, current_lora try: # Load model if not already loaded pipe = load_model() # Handle LoRA loading if lora_name and lora_name != "None": if current_lora != lora_name: # Unload previous LoRA if exists if current_lora: pipe.unload_lora_weights() # Load new LoRA lora_path = get_available_loras().get(lora_name) if lora_path: load_lora_weights(pipe, lora_path) current_lora = lora_name print(f"Loaded LoRA: {lora_name}") # Set LoRA scale if hasattr(pipe, 'set_adapters'): pipe.set_adapters(["default"], [lora_scale]) else: # Unload LoRA if "None" selected if current_lora: pipe.unload_lora_weights() current_lora = None # Handle seed if randomize_seed: seed = torch.randint(0, 2**32 - 1, (1,)).item() generator = torch.Generator(device=pipeline.device).manual_seed(seed) if seed != -1 else None # Generate image image = pipe( prompt=prompt, negative_prompt=negative_prompt if negative_prompt else None, width=width, height=height, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator, ).images[0] return image, str(seed) except Exception as e: raise gr.Error(f"Generation failed: {str(e)}") # Get available LoRAs available_loras = get_available_loras() lora_choices = ["None"] + list(available_loras.keys()) # Create Gradio 6 app with modern theme with gr.Blocks() as demo: # Header with anycoder link gr.HTML("""

🎨 Magic Wan Image V2 - Text to Image

Generate stunning images from text prompts with LoRA support

Built with anycoder 🚀

""") with gr.Row(): # Left column - Controls with gr.Column(scale=1): gr.Markdown("### 📝 Prompt") prompt = gr.Textbox( label="Prompt", placeholder="Describe the image you want to generate...", lines=3, value="A beautiful sunset over mountains, highly detailed, 8k" ) negative_prompt = gr.Textbox( label="Negative Prompt (Optional)", placeholder="What to avoid in the image...", lines=2, value="blurry, low quality, distorted" ) gr.Markdown("### 🎭 LoRA Style") lora_dropdown = gr.Dropdown( choices=lora_choices, value="None", label="Select LoRA Adapter", info="Choose a style adapter or leave as None for base model" ) lora_scale = gr.Slider( minimum=0.0, maximum=2.0, value=1.0, step=0.1, label="LoRA Scale", info="Strength of LoRA effect (0.0 = no effect)" ) gr.Markdown("### ⚙️ Generation Settings") with gr.Row(): width = gr.Slider( minimum=256, maximum=1024, value=512, step=64, label="Width" ) height = gr.Slider( minimum=256, maximum=1024, value=512, step=64, label="Height" ) with gr.Row(): num_inference_steps = gr.Slider( minimum=10, maximum=100, value=30, step=1, label="Inference Steps" ) guidance_scale = gr.Slider( minimum=1.0, maximum=20.0, value=7.5, step=0.5, label="Guidance Scale" ) gr.Markdown("### 🎲 Seed Settings") with gr.Row(): seed = gr.Number( value=-1, label="Seed (-1 for random)", precision=0 ) randomize_seed = gr.Checkbox( value=True, label="Randomize Seed" ) generate_btn = gr.Button( "🚀 Generate Image", variant="primary", size="lg" ) # Right column - Output with gr.Column(scale=1): gr.Markdown("### 🖼️ Generated Image") output_image = gr.Image( label="Generated Image", type="pil", height=512 ) seed_output = gr.Textbox( label="Used Seed", interactive=False ) gr.Markdown("### 💡 Tips") gr.Markdown(""" - **Prompt**: Be descriptive and specific - **LoRA**: Try different style adapters for unique looks - **Steps**: More steps = better quality but slower - **Guidance**: Higher = more prompt adherence - **Seed**: Use same seed for reproducible results """) # Examples section gr.Markdown("### 📚 Examples") examples = gr.Examples( examples=[ [ "A cyberpunk city at night, neon lights, rain, highly detailed", "blurry, low quality", "None", 1.0, 512, 512, 30, 7.5, -1, True ], [ "Portrait of a fantasy elf warrior, intricate armor, forest background", "deformed, ugly, bad anatomy", "None", 1.0, 512, 768, 30, 7.5, -1, True ], [ "Magical library with floating books, mystical atmosphere, warm lighting", "dark, scary", "None", 1.0, 768, 512, 30, 7.5, -1, True ], [ "Steampunk airship flying through clouds, detailed mechanical parts", "modern, electronic", "None", 1.0, 512, 512, 30, 7.5, -1, True ], ], inputs=[ prompt, negative_prompt, lora_dropdown, lora_scale, width, height, num_inference_steps, guidance_scale, seed, randomize_seed ], label="Click an example to load settings" ) # Connect generate button generate_btn.click( fn=generate, inputs=[ prompt, negative_prompt, lora_dropdown, lora_scale, width, height, num_inference_steps, guidance_scale, seed, randomize_seed ], outputs=[output_image, seed_output], api_visibility="public" ) # Footer gr.HTML("""

Model: Magic-Wan-Image-V2 | Powered by Gradio 6

""") # Launch with Gradio 6 syntax - theme goes in launch(), not Blocks! if __name__ == "__main__": demo.launch( theme=gr.themes.Soft( primary_hue="indigo", secondary_hue="purple", neutral_hue="slate", font=gr.themes.GoogleFont("Inter"), text_size="md", spacing_size="md", radius_size="md" ).set( button_primary_background_fill="*primary_600", button_primary_background_fill_hover="*primary_700", block_title_text_weight="600", ), footer_links=[ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}, {"label": "Model Card", "url": "https://huggingface.co/wikeeyang/Magic-Wan-Image-V2"}, "api" ], allow_flagging="never" )