tomiconic commited on
Commit
0f2cc0f
Β·
verified Β·
1 Parent(s): 7b7bfd6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -0
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionXLPipeline, DPMSolverMultistepScheduler
4
+ import random
5
+
6
+ # ── Device detection ───────────────────────────────────────────────────────────
7
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
8
+ DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32
9
+
10
+ print(f"Running on: {DEVICE.upper()} | dtype: {DTYPE}")
11
+
12
+ # ── Model ──────────────────────────────────────────────────────────────────────
13
+ MODEL_REPO = "cyberdelia/CyberRealisticPony"
14
+ MODEL_FILE = "CyberRealisticPony_V16.0_FP16.safetensors"
15
+ MODEL_URL = f"https://huggingface.co/{MODEL_REPO}/resolve/main/{MODEL_FILE}"
16
+
17
+ # ── Pony quality tags (added automatically β€” do not type these in your prompt) ─
18
+ PONY_POS_PREFIX = "score_9, score_8_up, score_7_up, "
19
+ PONY_NEG_PREFIX = "score_6, score_5, score_4, "
20
+
21
+ # ── Load model ─────────────────────────────────────────────────────────────────
22
+ print("Loading model β€” this may take several minutes on first start...")
23
+
24
+ pipe = StableDiffusionXLPipeline.from_single_file(
25
+ MODEL_URL,
26
+ torch_dtype=DTYPE,
27
+ use_safetensors=True,
28
+ )
29
+
30
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(
31
+ pipe.scheduler.config,
32
+ use_karras_sigmas=True,
33
+ )
34
+
35
+ pipe.enable_attention_slicing()
36
+
37
+ if DEVICE == "cuda":
38
+ pipe.enable_xformers_memory_efficient_attention()
39
+
40
+ pipe = pipe.to(DEVICE)
41
+
42
+ print("Model ready.")
43
+
44
+ # ── Generation function ────────────────────────────────────────────────────────
45
+ def generate_image(prompt, negative_prompt, width, height, steps, guidance, seed, randomize_seed):
46
+ if not prompt or prompt.strip() == "":
47
+ raise gr.Error("Please enter a prompt before generating.")
48
+
49
+ if randomize_seed:
50
+ seed = random.randint(0, 2**32 - 1)
51
+
52
+ seed = int(seed)
53
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
54
+
55
+ full_prompt = PONY_POS_PREFIX + prompt.strip()
56
+ full_negative = PONY_NEG_PREFIX + negative_prompt.strip()
57
+
58
+ output = pipe(
59
+ prompt=full_prompt,
60
+ negative_prompt=full_negative,
61
+ width=int(width),
62
+ height=int(height),
63
+ num_inference_steps=int(steps),
64
+ guidance_scale=float(guidance),
65
+ generator=generator,
66
+ clip_skip=2,
67
+ )
68
+
69
+ return output.images[0], seed
70
+
71
+
72
+ # ── UI ─────────────────────────────────────────────────────────────────────────
73
+ with gr.Blocks(title="CyberRealistic Pony", theme=gr.themes.Soft()) as demo:
74
+
75
+ gr.Markdown("# 🐴 CyberRealistic Pony Generator")
76
+ gr.Markdown(
77
+ "> **Model:** CyberRealistic Pony v16.0 Β· SDXL / Pony architecture \n"
78
+ "> ⚠️ **Running on CPU** β€” generation takes 30–90+ minutes per image. "
79
+ "Upgrade to ZeroGPU in Space Settings for practical speeds.\n\n"
80
+ "_Quality tags (`score_9, score_8_up, score_7_up`) are prepended to your prompt automatically._"
81
+ )
82
+
83
+ with gr.Row():
84
+ with gr.Column(scale=1):
85
+
86
+ prompt = gr.Textbox(
87
+ label="Prompt",
88
+ placeholder="a woman in a futuristic city, cinematic lighting, highly detailed, photorealistic",
89
+ lines=4,
90
+ )
91
+ negative_prompt = gr.Textbox(
92
+ label="Negative Prompt",
93
+ value=(
94
+ "(worst quality:1.2), (low quality:1.2), (normal quality:1.2), "
95
+ "lowres, bad anatomy, bad hands, signature, watermarks, "
96
+ "ugly, imperfect eyes, skewed eyes, unnatural face, "
97
+ "unnatural body, error, extra limb, missing limbs"
98
+ ),
99
+ lines=3,
100
+ )
101
+
102
+ gr.Markdown("### Image Size")
103
+ with gr.Row():
104
+ width = gr.Slider(minimum=512, maximum=896, value=768, step=64, label="Width")
105
+ height = gr.Slider(minimum=512, maximum=1152, value=896, step=64, label="Height")
106
+
107
+ gr.Markdown("### Sampling")
108
+ steps = gr.Slider(minimum=10, maximum=30, value=20, step=1, label="Steps β€” keep low on CPU")
109
+ guidance = gr.Slider(minimum=1.0, maximum=12.0, value=5.0, step=0.5, label="Guidance Scale (CFG) β€” recommended: 5")
110
+
111
+ gr.Markdown("### Seed")
112
+ with gr.Row():
113
+ seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=2**32 - 1)
114
+ randomize_seed = gr.Checkbox(label="Randomise Seed", value=True)
115
+
116
+ generate_btn = gr.Button("Generate Image πŸ–ΌοΈ", variant="primary", size="lg")
117
+
118
+ with gr.Column(scale=1):
119
+ output_image = gr.Image(label="Generated Image", type="pil", height=500)
120
+ used_seed = gr.Number(label="Seed Used", interactive=False)
121
+
122
+ generate_btn.click(
123
+ fn=generate_image,
124
+ inputs=[prompt, negative_prompt, width, height, steps, guidance, seed, randomize_seed],
125
+ outputs=[output_image, used_seed],
126
+ )
127
+
128
+ demo.launch()