dp / app.py
Galaxydude2's picture
Update app.py
9925016 verified
# app.py
# Uncensored Text-to-Image mit Stable Diffusion – keine Safety-Checks, kein NSFW-Filter
# Für Hugging Face Spaces / lokales Ausführen
import gradio as gr
from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler
import torch
# Modell wählen – diese sind relativ uncensored-freundlich
# Alternativen: "Lykon/DreamShaper", "prompthero/openjourney-v4", "nitrosocke/Arcane-Diffusion", "runwayml/stable-diffusion-v1-5"
model_id = "Lykon/DreamShaper"
print(f"Lade Modell: {model_id}")
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
safety_checker=None, # ← wichtig: deaktiviert NSFW-Filter komplett
requires_safety_checker=False # ← das auch
)
# Scheduler für bessere Ergebnisse bei expliziten/uncensored Prompts
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
# VRAM-Optimierungen
pipe.enable_attention_slicing()
pipe.enable_model_cpu_offload() # hilft bei wenig VRAM
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
def generate(
prompt,
negative_prompt="",
steps=35,
guidance=7.0,
width=512,
height=512,
seed=-1,
progress=gr.Progress(track_tqdm=True)
):
if seed == -1:
generator = None
else:
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu")
generator.manual_seed(int(seed))
progress(0, desc="Starte Generierung...")
try:
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_inference_steps=steps,
guidance_scale=guidance,
width=width,
height=height,
generator=generator,
).images[0]
progress(1.0, desc="Fertig")
return image
except Exception as e:
return f"Fehler: {str(e)}"
css = """
body { background: #0d0015; color: #e0d0ff; font-family: system-ui; }
.gradio-container { max-width: 1200px !important; }
h1 { color: #ff4d94; text-shadow: 0 0 20px #ff4d9480; }
button { background: linear-gradient(45deg, #ff4d94, #c71585) !important; }
"""
with gr.Blocks(css=css, title="Uncensored Image Generator") as demo:
gr.Markdown("""
# Uncensored Text-to-Image Generator
Kein Safety-Checker · Kein NSFW-Filter · Keine Zensur
""")
with gr.Row():
with gr.Column(scale=6):
prompt = gr.Textbox(
label="Prompt (beliebig explizit möglich)",
placeholder="very detailed, nsfw, explicit, nude woman, cinematic lighting, masterpiece",
lines=4,
max_lines=8
)
neg_prompt = gr.Textbox(
label="Negative Prompt (optional)",
placeholder="blurry, low quality, deformed, extra limbs, watermark, text",
lines=2
)
with gr.Column(scale=4):
with gr.Group():
gr.Markdown("**Einstellungen**")
steps = gr.Slider(15, 80, value=35, step=1, label="Steps")
guidance = gr.Slider(1.0, 20.0, value=7.0, step=0.5, label="CFG / Guidance Scale")
seed = gr.Number(value=-1, label="Seed (-1 = random)")
with gr.Group():
gr.Markdown("**Auflösung**")
width = gr.Slider(256, 1024, value=512, step=64, label="Breite")
height = gr.Slider(256, 1024, value=512, step=64, label="Höhe")
btn = gr.Button("Generieren", variant="primary", scale=0)
output_image = gr.Image(label="Ergebnis", type="pil", show_label=False)
btn.click(
fn=generate,
inputs=[prompt, neg_prompt, steps, guidance, width, height, seed],
outputs=output_image,
_js="() => {return []}" # verhindert unnötige Validierung
)
demo.launch(
share=True,
server_name="0.0.0.0",
server_port=7860
)