CryptoCreeper commited on
Commit
ffc28ff
·
verified ·
1 Parent(s): acdb9d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -113
app.py CHANGED
@@ -2,171 +2,161 @@ import gradio as gr
2
  import torch
3
  import random
4
  import time
 
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
  from diffusers import DiffusionPipeline, LCMScheduler
7
  from PIL import Image, ImageFilter
 
8
 
 
 
 
9
  TEXT_MODEL_ID = "HuggingFaceTB/SmolLM-135M-Instruct"
10
-
11
  tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL_ID)
12
  text_model = AutoModelForCausalLM.from_pretrained(TEXT_MODEL_ID)
13
 
14
  def enhance_prompt(user_prompt: str) -> str:
15
  if not user_prompt.strip():
16
  return "A beautiful digital painting of a fantasy landscape"
17
-
18
  instruction = (
19
- f"<|im_start|>system\nYou are a prompt engineer. Expand the user's prompt into a detailed, descriptive visual prompt for SD 1.5. Output only the enhanced prompt.<|im_end|>\n"
20
  f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
21
  f"<|im_start|>assistant\n"
22
  )
23
-
24
  inputs = tokenizer(instruction, return_tensors="pt")
25
  with torch.no_grad():
26
- outputs = text_model.generate(
27
- **inputs,
28
- max_new_tokens=500,
29
- temperature=0.7,
30
- do_sample=True,
31
- pad_token_id=tokenizer.eos_token_id
32
- )
33
-
34
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
35
- if "assistant" in decoded:
36
- decoded = decoded.split("assistant")[-1]
37
-
38
- return decoded.strip()
39
 
40
  IMG_MODEL = "runwayml/stable-diffusion-v1-5"
41
  LCM_LORA = "latent-consistency/lcm-lora-sdv1-5"
42
 
43
- pipe = DiffusionPipeline.from_pretrained(
44
- IMG_MODEL,
45
- torch_dtype=torch.float32,
46
- safety_checker=None
47
- )
48
-
49
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
50
  pipe.load_lora_weights(LCM_LORA)
51
  pipe.to("cpu")
52
-
53
  pipe.enable_attention_slicing()
54
  pipe.enable_vae_slicing()
55
  pipe.set_progress_bar_config(disable=True)
56
 
57
- def estimate_time(steps, res):
58
- res = int(res)
59
- steps = int(steps)
60
- base_sec = {512: 15, 768: 35, 1024: 75}[res]
61
- total = (steps * base_sec) + 10
62
- return f"⏱️ **Estimate**: ~{total//60}m {total%60}s"
63
-
64
- def generate(prompt, negative, resolution, steps):
65
- size = int(resolution)
66
-
67
- yield (
68
- None,
69
- "🧠 Analysing Prompt",
70
- gr.update(interactive=False)
71
- )
72
-
73
- enhanced = enhance_prompt(prompt)
74
-
75
- yield (
76
- None,
77
- "🎨 Generating Image...",
78
- gr.update(interactive=False)
79
- )
80
-
81
- seed = random.randint(0, 2**32 - 1)
82
- generator = torch.Generator("cpu").manual_seed(seed)
83
-
84
- start = time.time()
85
- image = pipe(
86
- prompt=enhanced,
87
- negative_prompt=negative,
88
- num_inference_steps=int(steps),
89
- guidance_scale=1.0,
90
- width=size,
91
- height=size,
92
- generator=generator
93
- ).images[0]
94
- elapsed = int(time.time() - start)
95
-
96
- for i in range(5):
97
- blur_radius = (5 - i) * 2
98
- blur = image.filter(ImageFilter.GaussianBlur(radius=blur_radius))
99
- yield (
100
- blur,
101
- "🎨 Generating Image...",
102
- gr.update(interactive=False)
103
  )
104
- time.sleep(0.2)
105
-
106
- yield (
107
- image,
108
- f"✅ Done in {elapsed}s.",
109
- gr.update(interactive=True)
110
- )
111
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  custom_css = """
113
  #container { max-width: 1000px; margin: auto; }
114
  .generate-btn { background: linear-gradient(90deg, #2ecc71, #27ae60) !important; color: white !important; }
115
- .status-box {
116
- font-size: 1.1em;
117
- padding: 10px;
118
- border-radius: 8px;
119
- background: #ffffff !important;
120
- border: 1px solid #ddd;
121
- }
122
  .status-box * { color: black !important; }
123
  """
124
 
125
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="green", secondary_hue="gray"), css=custom_css) as demo:
126
  with gr.Column(elem_id="container"):
127
- gr.Markdown("# 👾 Creeper AI - v3.3")
128
  gr.Markdown("Generate images using Creeper AI, running on CPU using fast models.")
129
 
130
  with gr.Row():
131
  with gr.Column(scale=1):
132
- prompt = gr.Textbox(
133
- label="What do you want to see?",
134
- placeholder="e.g. A futuristic city in a forest",
135
- lines=3
136
- )
137
- negative = gr.Textbox(
138
- label="Negative Prompt",
139
- value="blurry, low quality, distorted, watermark",
140
- placeholder="Things to avoid..."
141
- )
142
 
143
  with gr.Accordion("Settings ⚙️", open=True):
144
- resolution = gr.Radio(
145
- [512, 768, 1024],
146
- value=512,
147
- label="Resolution"
148
- )
149
- steps = gr.Slider(
150
- 6, 10,
151
- value=6,
152
- step=1,
153
- label="Inference Steps"
154
- )
155
-
156
- eta = gr.Markdown("⏱️ **Estimate**: ~1m 40s")
157
  generate_btn = gr.Button("🚀 Generate Image", variant="primary", elem_classes="generate-btn")
158
 
159
  with gr.Column(scale=1):
160
  output_img = gr.Image(label="Result", interactive=False)
161
  status = gr.Markdown("🟢 Ready", elem_classes="status-box")
162
 
163
- resolution.change(estimate_time, [steps, resolution], eta)
164
- steps.change(estimate_time, [steps, resolution], eta)
 
 
 
 
165
 
166
  generate_btn.click(
167
  generate,
168
- inputs=[prompt, negative, resolution, steps],
169
  outputs=[output_img, status, generate_btn]
170
  )
171
 
172
- demo.launch()
 
2
  import torch
3
  import random
4
  import time
5
+ import io
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
  from diffusers import DiffusionPipeline, LCMScheduler
8
  from PIL import Image, ImageFilter
9
+ from gradio_client import Client
10
 
11
+ # ===============================
12
+ # LOCAL MODELS (CPU MODE)
13
+ # ===============================
14
  TEXT_MODEL_ID = "HuggingFaceTB/SmolLM-135M-Instruct"
 
15
  tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL_ID)
16
  text_model = AutoModelForCausalLM.from_pretrained(TEXT_MODEL_ID)
17
 
18
  def enhance_prompt(user_prompt: str) -> str:
19
  if not user_prompt.strip():
20
  return "A beautiful digital painting of a fantasy landscape"
 
21
  instruction = (
22
+ f"<|im_start|>system\nYou are a prompt engineer. Expand the user's prompt into a detailed visual prompt. Output only the enhanced prompt.<|im_end|>\n"
23
  f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
24
  f"<|im_start|>assistant\n"
25
  )
 
26
  inputs = tokenizer(instruction, return_tensors="pt")
27
  with torch.no_grad():
28
+ outputs = text_model.generate(**inputs, max_new_tokens=500, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
29
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
+ return decoded.split("assistant")[-1].strip() if "assistant" in decoded else decoded.strip()
 
 
 
31
 
32
  IMG_MODEL = "runwayml/stable-diffusion-v1-5"
33
  LCM_LORA = "latent-consistency/lcm-lora-sdv1-5"
34
 
35
+ pipe = DiffusionPipeline.from_pretrained(IMG_MODEL, torch_dtype=torch.float32, safety_checker=None)
 
 
 
 
 
36
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
37
  pipe.load_lora_weights(LCM_LORA)
38
  pipe.to("cpu")
 
39
  pipe.enable_attention_slicing()
40
  pipe.enable_vae_slicing()
41
  pipe.set_progress_bar_config(disable=True)
42
 
43
+ # ===============================
44
+ # ULTRA MODE (REMOTE API)
45
+ # ===============================
46
+ def call_ultra_api(prompt, negative, steps, seed):
47
+ try:
48
+ client = Client("mrfakename/Z-Image-Turbo")
49
+ result = client.predict(
50
+ prompt=prompt,
51
+ negative_prompt=negative,
52
+ num_inference_steps=steps,
53
+ seed=seed,
54
+ api_name="/predict"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  )
56
+ # The client returns a path to the image
57
+ return Image.open(result)
58
+ except Exception as e:
59
+ print(f"API Error: {e}")
60
+ return None
61
+
62
+ # ===============================
63
+ # UI LOGIC
64
+ # ===============================
65
+ def toggle_ultra(is_ultra):
66
+ if is_ultra:
67
+ return {
68
+ negative_field: gr.update(visible=True),
69
+ resolution: gr.update(choices=[512, 1024], value=512),
70
+ steps: gr.update(minimum=6, maximum=20, value=6)
71
+ }
72
+ else:
73
+ return {
74
+ negative_field: gr.update(visible=False),
75
+ resolution: gr.update(choices=[512, 768, 1024], value=512),
76
+ steps: gr.update(minimum=6, maximum=10, value=6)
77
+ }
78
+
79
+ def generate(prompt, user_neg, res, step_val, is_ultra):
80
+ size = int(res)
81
+ seed = random.randint(0, 2**32 - 1)
82
+ default_neg = "blurry, low quality, distorted, watermark"
83
+
84
+ if is_ultra:
85
+ yield (None, "🎨 Generating Image...", gr.update(interactive=False))
86
+ # Ultra mode uses user_neg (field is visible) and skips enhancement
87
+ image = call_ultra_api(prompt, user_neg, step_val, seed)
88
+ if image:
89
+ yield (image, f"✅ Done in Ultra Mode.", gr.update(interactive=True))
90
+ else:
91
+ yield (None, "❌ API Error or Limit Reached", gr.update(interactive=True))
92
+ else:
93
+ yield (None, "🧠 Analysing Prompt", gr.update(interactive=False))
94
+ enhanced = enhance_prompt(prompt)
95
+
96
+ yield (None, "🎨 Generating Image...", gr.update(interactive=False))
97
+ generator = torch.Generator("cpu").manual_seed(seed)
98
+ start = time.time()
99
+
100
+ # Normal mode uses default_neg
101
+ image = pipe(
102
+ prompt=enhanced,
103
+ negative_prompt=default_neg,
104
+ num_inference_steps=int(step_val),
105
+ guidance_scale=1.0,
106
+ width=size,
107
+ height=size,
108
+ generator=generator
109
+ ).images[0]
110
+
111
+ elapsed = int(time.time() - start)
112
+ for i in range(5):
113
+ blur = image.filter(ImageFilter.GaussianBlur(radius=(5 - i) * 2))
114
+ yield (blur, "🎨 Generating Image...", gr.update(interactive=False))
115
+ time.sleep(0.2)
116
+ yield (image, f"✅ Done in {elapsed}s.", gr.update(interactive=True))
117
+
118
+ # ===============================
119
+ # INTERFACE
120
+ # ===============================
121
  custom_css = """
122
  #container { max-width: 1000px; margin: auto; }
123
  .generate-btn { background: linear-gradient(90deg, #2ecc71, #27ae60) !important; color: white !important; }
124
+ .status-box { font-size: 1.1em; padding: 10px; border-radius: 8px; background: #ffffff !important; border: 1px solid #ddd; }
 
 
 
 
 
 
125
  .status-box * { color: black !important; }
126
  """
127
 
128
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="green"), css=custom_css) as demo:
129
  with gr.Column(elem_id="container"):
130
+ gr.Markdown("# 👾 Creeper AI - v4.0")
131
  gr.Markdown("Generate images using Creeper AI, running on CPU using fast models.")
132
 
133
  with gr.Row():
134
  with gr.Column(scale=1):
135
+ prompt_field = gr.Textbox(label="What do you want to see?", placeholder="e.g. A futuristic city", lines=3)
136
+ negative_field = gr.Textbox(label="Negative Prompt", value="blurry, low quality, distorted, watermark", visible=False)
 
 
 
 
 
 
 
 
137
 
138
  with gr.Accordion("Settings ⚙️", open=True):
139
+ resolution = gr.Radio([512, 768, 1024], value=512, label="Resolution")
140
+ steps = gr.Slider(6, 10, value=6, step=1, label="Inference Steps")
141
+ ultra_check = gr.Checkbox(label="Ultra-Fast Gen (A few IMGs per day)")
142
+
 
 
 
 
 
 
 
 
 
143
  generate_btn = gr.Button("🚀 Generate Image", variant="primary", elem_classes="generate-btn")
144
 
145
  with gr.Column(scale=1):
146
  output_img = gr.Image(label="Result", interactive=False)
147
  status = gr.Markdown("🟢 Ready", elem_classes="status-box")
148
 
149
+ # Connect the UI logic
150
+ ultra_check.change(
151
+ toggle_ultra,
152
+ inputs=[ultra_check],
153
+ outputs=[negative_field, resolution, steps]
154
+ )
155
 
156
  generate_btn.click(
157
  generate,
158
+ inputs=[prompt_field, negative_field, resolution, steps, ultra_check],
159
  outputs=[output_img, status, generate_btn]
160
  )
161
 
162
+ demo.launch()