Galaxydude2 commited on
Commit
ff7f1a9
·
verified ·
1 Parent(s): 1a8e621

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -0
app.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import gradio as gr
4
+ import torch
5
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
6
+
7
+ # ──────────────────────────────────────────────────────
8
+ # Pony Diffusion V6 XL direkt von Hugging Face laden
9
+ # (HF cached es automatisch – bei Free Spaces dauert erster Start länger, danach schneller)
10
+ # ──────────────────────────────────────────────────────
11
+ MODEL_REPO = "LyliaEngine/Pony_Diffusion_V6_XL" # ← Beste konvertierte Version 2026
12
+
13
+ print(f"Lade Pony Diffusion V6 XL von Hugging Face: {MODEL_REPO}")
14
+ print("(Erster Start kann 3–10 Minuten dauern wegen Download & Cache)")
15
+
16
+ pipe = StableDiffusionXLPipeline.from_pretrained(
17
+ MODEL_REPO,
18
+ torch_dtype=torch.float16,
19
+ use_safetensors=True,
20
+ variant="fp16",
21
+ )
22
+
23
+ # Bessere VAE (fast immer nötig bei Pony für saubere Farben/Anatomie)
24
+ pipe.vae = AutoencoderKL.from_pretrained(
25
+ "madebyollin/sdxl-vae-fp16-fix",
26
+ torch_dtype=torch.float16
27
+ )
28
+
29
+ # Euler a ist Pony's Lieblingssampler
30
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
31
+
32
+ if torch.cuda.is_available():
33
+ pipe.to("cuda")
34
+ print("Modell auf GPU geladen")
35
+ else:
36
+ print("Keine GPU → CPU-Modus (langsamer)")
37
+
38
+ # Safety raus – Pony ist uncensored
39
+ pipe.safety_checker = None
40
+ pipe.requires_safety_checker = False
41
+
42
+ # ──────────────────────────────────────────────────────
43
+ # Generate-Funktion (wie zuvor)
44
+ # ──────────────────────────────────────────────────────
45
+ def generate(
46
+ prompt,
47
+ negative_prompt="",
48
+ steps=28,
49
+ cfg=6.5,
50
+ seed=0,
51
+ resolution="832×1216"
52
+ ):
53
+ start_time = time.time()
54
+
55
+ # Pony-Prompt-Boost (sehr wichtig!)
56
+ score_tags = "score_9, score_8_up, score_7_up, "
57
+ style_tags = ", cartoon style, anime style, vibrant colors, bold outlines, stylized nudity, detailed background, masterpiece, best quality"
58
+
59
+ full_prompt = f"{score_tags}{prompt}, nsfw, nude, naked, bare skin{style_tags}"
60
+
61
+ default_neg = (
62
+ "score_6, score_5, score_4, blurry, lowres, worst quality, low quality, "
63
+ "bad anatomy, deformed, extra limbs, missing limbs, bad hands, text, watermark, "
64
+ "signature, child, loli, underage, young, baby"
65
+ )
66
+
67
+ full_negative = (negative_prompt + ", " if negative_prompt else "") + default_neg
68
+ full_negative = full_negative.strip(", ")
69
+
70
+ generator = None if seed == 0 else torch.Generator(device=pipe.device).manual_seed(int(seed))
71
+
72
+ try:
73
+ width, height = map(int, resolution.split("×"))
74
+ except:
75
+ width, height = 832, 1216
76
+
77
+ image = pipe(
78
+ prompt=full_prompt,
79
+ negative_prompt=full_negative,
80
+ num_inference_steps=steps,
81
+ guidance_scale=cfg,
82
+ width=width,
83
+ height=height,
84
+ generator=generator,
85
+ ).images[0]
86
+
87
+ duration = time.time() - start_time
88
+ print(f"Fertig in {duration:.1f} Sekunden")
89
+
90
+ return image
91
+
92
+ # ──────────────────────────────────────────────────────
93
+ # Gradio UI
94
+ # ──────────────────────────────────────────────────────
95
+ css = """
96
+ .gradio-container {max-width: 940px !important; margin: auto;}
97
+ """
98
+
99
+ with gr.Blocks(title="Pony Cartoon Nude Generator @ HF Spaces", css=css) as demo:
100
+ gr.Markdown("""
101
+ # Pony Diffusion V6 XL – Stylisierte Nackt/Cartoon-Bilder
102
+ Läuft komplett auf Hugging Face (kein Civitai mehr nötig)
103
+ **Prompt-Tipps:** Englisch · starte mit score_9, score_8_up · nsfw, nude · detailliert beschreiben
104
+ CFG 5.5–7.5 · Steps 24–40 · Euler a super
105
+ """)
106
+
107
+ with gr.Row():
108
+ with gr.Column(scale=6):
109
+ prompt_input = gr.Textbox(
110
+ label="Prompt (englisch am besten)",
111
+ placeholder="beautiful curvy anime girl, long silver hair, seductive pose, beach sunset, detailed eyes, nsfw",
112
+ lines=5,
113
+ value="gorgeous woman, pink hair, big eyes, nude, tropical beach, golden hour lighting"
114
+ )
115
+ negative_input = gr.Textbox(
116
+ label="Negative Prompt (optional)",
117
+ lines=3,
118
+ value="blurry, deformed, child, loli, text, watermark, realistic"
119
+ )
120
+
121
+ with gr.Column(scale=4):
122
+ gr.Markdown("**Settings**")
123
+ steps_slider = gr.Slider(15, 60, value=28, step=1, label="Steps")
124
+ cfg_slider = gr.Slider(3.0, 12.0, value=6.5, step=0.5, label="CFG Scale")
125
+ seed_number = gr.Number(value=0, precision=0, label="Seed (0 = random)")
126
+ resolution_dropdown = gr.Dropdown(
127
+ choices=["832×1216", "1216×832", "1024×1024", "896×1152", "1152×896"],
128
+ value="832×1216",
129
+ label="Resolution"
130
+ )
131
+
132
+ generate_button = gr.Button("Generate", variant="primary")
133
+
134
+ output_image = gr.Image(label="Result", type="pil")
135
+
136
+ generate_button.click(
137
+ fn=generate,
138
+ inputs=[prompt_input, negative_input, steps_slider, cfg_slider, seed_number, resolution_dropdown],
139
+ outputs=output_image
140
+ )
141
+
142
+ gr.Examples(
143
+ examples=[
144
+ ["sexy elf girl, green hair, fantasy forest, nsfw, detailed skin, seductive", "", 30, 7.0, 4242, "832×1216"],
145
+ ["muscular anthro character, shower, steam, dramatic light, nude", "female, blurry", 28, 6.0, 999, "1216×832"],
146
+ ["cute anime catgirl, bath, bubbles, nsfw", "", 26, 6.5, 7777, "1024×1024"],
147
+ ],
148
+ inputs=[prompt_input, negative_input, steps_slider, cfg_slider, seed_number, resolution_dropdown]
149
+ )
150
+
151
+ gr.Markdown("""
152
+ **HF Spaces Info:** Erster Start → Modell-Download von HF (\~7 GB) → Geduld. Danach viel schneller.
153
+ Free Spaces schlafen irgendwann → dann evtl. wieder laden.
154
+ """)
155
+
156
+ demo.queue(max_size=8).launch(
157
+ server_name="0.0.0.0",
158
+ server_port=7860,
159
+ show_error=True
160
+ )