import os import time import gradio as gr import torch from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler # ────────────────────────────────────────────────────── # Pony Diffusion V6 XL direkt von Hugging Face laden # (HF cached es automatisch – bei Free Spaces dauert erster Start länger, danach schneller) # ────────────────────────────────────────────────────── MODEL_REPO = "LyliaEngine/Pony_Diffusion_V6_XL" # ← Beste konvertierte Version 2026 print(f"Lade Pony Diffusion V6 XL von Hugging Face: {MODEL_REPO}") print("(Erster Start kann 3–10 Minuten dauern wegen Download & Cache)") pipe = StableDiffusionXLPipeline.from_pretrained( MODEL_REPO, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) # Bessere VAE (fast immer nötig bei Pony für saubere Farben/Anatomie) pipe.vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) # Euler a ist Pony's Lieblingssampler pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) if torch.cuda.is_available(): pipe.to("cuda") print("Modell auf GPU geladen") else: print("Keine GPU → CPU-Modus (langsamer)") # Safety raus – Pony ist uncensored pipe.safety_checker = None pipe.requires_safety_checker = False # ────────────────────────────────────────────────────── # Generate-Funktion (wie zuvor) # ────────────────────────────────────────────────────── def generate( prompt, negative_prompt="", steps=28, cfg=6.5, seed=0, resolution="832×1216" ): start_time = time.time() # Pony-Prompt-Boost (sehr wichtig!) score_tags = "score_9, score_8_up, score_7_up, " style_tags = ", cartoon style, anime style, vibrant colors, bold outlines, stylized nudity, detailed background, masterpiece, best quality" full_prompt = f"{score_tags}{prompt}, nsfw, nude, naked, bare skin{style_tags}" default_neg = ( "score_6, score_5, score_4, blurry, lowres, worst quality, low quality, " "bad anatomy, deformed, extra limbs, missing limbs, bad hands, text, watermark, " "signature, child, loli, underage, young, baby" ) full_negative = (negative_prompt + ", " if negative_prompt else "") + default_neg full_negative = full_negative.strip(", ") generator = None if seed == 0 else torch.Generator(device=pipe.device).manual_seed(int(seed)) try: width, height = map(int, resolution.split("×")) except: width, height = 832, 1216 image = pipe( prompt=full_prompt, negative_prompt=full_negative, num_inference_steps=steps, guidance_scale=cfg, width=width, height=height, generator=generator, ).images[0] duration = time.time() - start_time print(f"Fertig in {duration:.1f} Sekunden") return image # ────────────────────────────────────────────────────── # Gradio UI # ────────────────────────────────────────────────────── css = """ .gradio-container {max-width: 940px !important; margin: auto;} """ with gr.Blocks(title="Pony Cartoon Nude Generator @ HF Spaces", css=css) as demo: gr.Markdown(""" # Pony Diffusion V6 XL – Stylisierte Nackt/Cartoon-Bilder Läuft komplett auf Hugging Face (kein Civitai mehr nötig) **Prompt-Tipps:** Englisch · starte mit score_9, score_8_up · nsfw, nude · detailliert beschreiben CFG 5.5–7.5 · Steps 24–40 · Euler a super """) with gr.Row(): with gr.Column(scale=6): prompt_input = gr.Textbox( label="Prompt (englisch am besten)", placeholder="beautiful curvy anime girl, long silver hair, seductive pose, beach sunset, detailed eyes, nsfw", lines=5, value="gorgeous woman, pink hair, big eyes, nude, tropical beach, golden hour lighting" ) negative_input = gr.Textbox( label="Negative Prompt (optional)", lines=3, value="blurry, deformed, child, loli, text, watermark, realistic" ) with gr.Column(scale=4): gr.Markdown("**Settings**") steps_slider = gr.Slider(15, 60, value=28, step=1, label="Steps") cfg_slider = gr.Slider(3.0, 12.0, value=6.5, step=0.5, label="CFG Scale") seed_number = gr.Number(value=0, precision=0, label="Seed (0 = random)") resolution_dropdown = gr.Dropdown( choices=["832×1216", "1216×832", "1024×1024", "896×1152", "1152×896"], value="832×1216", label="Resolution" ) generate_button = gr.Button("Generate", variant="primary") output_image = gr.Image(label="Result", type="pil") generate_button.click( fn=generate, inputs=[prompt_input, negative_input, steps_slider, cfg_slider, seed_number, resolution_dropdown], outputs=output_image ) gr.Examples( examples=[ ["sexy elf girl, green hair, fantasy forest, nsfw, detailed skin, seductive", "", 30, 7.0, 4242, "832×1216"], ["muscular anthro character, shower, steam, dramatic light, nude", "female, blurry", 28, 6.0, 999, "1216×832"], ["cute anime catgirl, bath, bubbles, nsfw", "", 26, 6.5, 7777, "1024×1024"], ], inputs=[prompt_input, negative_input, steps_slider, cfg_slider, seed_number, resolution_dropdown] ) gr.Markdown(""" **HF Spaces Info:** Erster Start → Modell-Download von HF (\~7 GB) → Geduld. Danach viel schneller. Free Spaces schlafen irgendwann → dann evtl. wieder laden. """) demo.queue(max_size=8).launch( server_name="0.0.0.0", server_port=7860, show_error=True )