CryptoCreeper commited on
Commit
e4b37f8
·
verified ·
1 Parent(s): 92d864d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -61
app.py CHANGED
@@ -2,113 +2,120 @@ import gradio as gr
2
  import torch
3
  import random
4
  import time
 
5
  from diffusers import DiffusionPipeline, LCMScheduler
6
  from PIL import Image, ImageFilter
7
 
8
  # -------------------------------
9
- # MODEL SETUP (CPU SAFE)
10
  # -------------------------------
11
- MODEL_ID = "runwayml/stable-diffusion-v1-5"
12
- ADAPTER_ID = "latent-consistency/lcm-lora-sdv1-5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  pipe = DiffusionPipeline.from_pretrained(
15
- MODEL_ID,
16
  torch_dtype=torch.float32,
17
  safety_checker=None
18
  )
19
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
20
- pipe.load_lora_weights(ADAPTER_ID)
21
  pipe.to("cpu")
22
-
23
  pipe.enable_attention_slicing()
24
  pipe.enable_vae_slicing()
25
  pipe.set_progress_bar_config(disable=True)
26
 
27
  # -------------------------------
28
- # PROMPT REFINEMENT
29
- # -------------------------------
30
- def refine_prompt(user_prompt: str):
31
- """
32
- CPU-fast prompt enhancement.
33
- Always focuses on a single centered object.
34
- """
35
- subject = user_prompt.strip()
36
- prompt = f"a single {subject}, centered, isolated, high quality, clean background"
37
- negative = "multiple objects, duplicate, blurry, low quality, cropped, out of frame, horror, grotesque, weird colors, artifacts"
38
- return prompt, negative
39
-
40
- # -------------------------------
41
- # ETA ESTIMATION
42
  # -------------------------------
43
  def estimate_time(steps, resolution):
44
  steps = int(steps)
45
  resolution = int(resolution)
46
- per_step = {256:6, 512:12, 768:25, 1024:45}[resolution]
47
  overhead = 2
48
  est = overhead + steps * per_step
49
- minutes = est // 60
50
- seconds = est % 60
51
- return f"⏱️ Estimated time: ~{int(minutes)}m {int(seconds)}s"
52
 
53
  # -------------------------------
54
- # IMAGE GENERATION WITH PROGRESSIVE BLUR
55
  # -------------------------------
56
- def generate(prompt, resolution, steps):
57
- steps = int(steps)
58
- resolution = int(resolution)
59
-
60
- # 1️⃣ Refine prompt
61
- refined_prompt, neg_prompt = refine_prompt(prompt)
62
 
63
- # 2️⃣ White placeholder image
64
- placeholder = Image.new("RGB", (resolution, resolution), (255, 255, 255))
65
- start_time = time.time()
66
  yield [placeholder], "🟡 Generating..."
67
 
68
- # 3️⃣ CPU Image generation
69
  seed = random.randint(0, 10**9)
70
  gen = torch.Generator("cpu").manual_seed(seed)
71
- pipe.scheduler.set_timesteps(steps)
72
 
73
  img = pipe(
74
- prompt=refined_prompt,
75
- negative_prompt=neg_prompt,
76
- num_inference_steps=steps,
77
  guidance_scale=1.2,
78
- width=resolution,
79
- height=resolution,
80
  generator=gen
81
  ).images[0]
82
 
83
- # 4️⃣ Progressive blur reveal
84
  max_blur = 20
85
- steps_blur = 10
86
- for i in range(steps_blur):
87
- blur_percent = 100 - i*10
88
- blurred_img = img.filter(ImageFilter.GaussianBlur(radius=max_blur * blur_percent / 100))
89
- yield [blurred_img], "🟢 Generating..."
90
-
91
  time.sleep(1)
92
 
93
- # 5️⃣ Final image with total time
94
- total_time = round(time.time() - start_time, 2)
95
- yield [img], f"✅ Done in {total_time}s"
96
 
97
  # -------------------------------
98
  # GRADIO UI
99
  # -------------------------------
100
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
101
- gr.Markdown("# 👾 CREEPER AI — CPU SMART IMAGE GENERATION")
 
102
 
103
  with gr.Row():
104
  with gr.Column():
105
- prompt_in = gr.Textbox(label="Prompt", placeholder="cute snake", lines=2)
106
-
107
- resolution = gr.Radio([256, 512, 768, 1024], value=512, label="Resolution")
108
 
109
- steps = gr.Slider(6, 8, value=6, step=1, label="Steps") # Minimum 6
 
110
 
111
- eta = gr.Markdown("⏱️ Estimated time: ~1m 0s")
112
  gen_btn = gr.Button("Generate")
113
 
114
  status = gr.Markdown("🟢 Ready")
@@ -116,15 +123,13 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
116
  with gr.Column():
117
  gallery = gr.Gallery(columns=1)
118
 
119
- # Update ETA dynamically
120
  for ctrl in [steps, resolution]:
121
  ctrl.change(estimate_time, [steps, resolution], eta)
122
 
123
- # Button click triggers generation
124
  gen_btn.click(
125
  generate,
126
- inputs=[prompt_in, resolution, steps],
127
  outputs=[gallery, status]
128
  )
129
 
130
- demo.launch()
 
2
  import torch
3
  import random
4
  import time
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM
6
  from diffusers import DiffusionPipeline, LCMScheduler
7
  from PIL import Image, ImageFilter
8
 
9
  # -------------------------------
10
+ # SMALL PROMPT ENHANCER (CPU)
11
  # -------------------------------
12
+ ENHANCER_MODEL = "HuggingFaceTB/SmolLM-135M-Instruct"
13
+ tokenizer_enhancer = AutoTokenizer.from_pretrained(ENHANCER_MODEL)
14
+ model_enhancer = AutoModelForCausalLM.from_pretrained(ENHANCER_MODEL)
15
+
16
+ def enhance_text(user_prompt, prefix):
17
+ """
18
+ Uses a tiny LLM to rewrite user input into a more detailed instruction.
19
+ """
20
+ instruction = f"Rewrite this for an image generator with detail: {user_prompt}"
21
+ if prefix:
22
+ instruction = f"{prefix} {user_prompt}"
23
+
24
+ inputs = tokenizer_enhancer(instruction, return_tensors="pt")
25
+ outputs = model_enhancer.generate(
26
+ **inputs,
27
+ max_new_tokens=50,
28
+ temperature=0.7,
29
+ do_sample=True
30
+ )
31
+ text = tokenizer_enhancer.decode(outputs[0], skip_special_tokens=True)
32
+ return text.strip()
33
+
34
+ # -------------------------------
35
+ # IMAGE MODEL SETUP (CPU SAFE)
36
+ # -------------------------------
37
+ IMG_MODEL_ID = "runwayml/stable-diffusion-v1-5"
38
+ IMG_ADAPTER_ID = "latent-consistency/lcm-lora-sdv1-5"
39
 
40
  pipe = DiffusionPipeline.from_pretrained(
41
+ IMG_MODEL_ID,
42
  torch_dtype=torch.float32,
43
  safety_checker=None
44
  )
45
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
46
+ pipe.load_lora_weights(IMG_ADAPTER_ID)
47
  pipe.to("cpu")
 
48
  pipe.enable_attention_slicing()
49
  pipe.enable_vae_slicing()
50
  pipe.set_progress_bar_config(disable=True)
51
 
52
  # -------------------------------
53
+ # ETA
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  # -------------------------------
55
  def estimate_time(steps, resolution):
56
  steps = int(steps)
57
  resolution = int(resolution)
58
+ per_step = {512:12, 768:25, 1024:45}[resolution]
59
  overhead = 2
60
  est = overhead + steps * per_step
61
+ mins = est // 60
62
+ secs = est % 60
63
+ return f"⏱️ Estimated: ~{mins}m {secs}s"
64
 
65
  # -------------------------------
66
+ # GENERATE WITH PROGRESSIVE BLUR
67
  # -------------------------------
68
+ def generate(prompt, neg_prompt, resolution, steps):
69
+ # 1️⃣ AI PROMPT ENHANCEMENT
70
+ enhanced_prompt = enhance_text(prompt, "")
71
+ enhanced_negative = enhance_text(neg_prompt, "Rewrite negative prompt:")
 
 
72
 
73
+ # 2️⃣ White placeholder
74
+ placeholder = Image.new("RGB", (int(resolution), int(resolution)), (255,255,255))
 
75
  yield [placeholder], "🟡 Generating..."
76
 
77
+ # 3️⃣ CPU IMAGE GENERATION
78
  seed = random.randint(0, 10**9)
79
  gen = torch.Generator("cpu").manual_seed(seed)
80
+ pipe.scheduler.set_timesteps(int(steps))
81
 
82
  img = pipe(
83
+ prompt=enhanced_prompt,
84
+ negative_prompt=enhanced_negative,
85
+ num_inference_steps=int(steps),
86
  guidance_scale=1.2,
87
+ width=int(resolution),
88
+ height=int(resolution),
89
  generator=gen
90
  ).images[0]
91
 
92
+ # 4️⃣ BLUR REVEAL
93
  max_blur = 20
94
+ for i in range(10):
95
+ blur_pct = 100 - i*10
96
+ blurred = img.filter(ImageFilter.GaussianBlur(radius=max_blur * blur_pct/100))
97
+ yield [blurred], "🟢 Revealing..."
 
 
98
  time.sleep(1)
99
 
100
+ # 5️⃣ FINAL
101
+ yield [img], f"✅ Done | Seed: {seed}"
 
102
 
103
  # -------------------------------
104
  # GRADIO UI
105
  # -------------------------------
106
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
107
+ gr.Markdown("# 👾 CREEPER AI — SMART IMAGE GENERATOR")
108
+ gr.Markdown("1: The higher the resolution & steps, the longer the image takes to make.\n2: The more detailed the prompt and negative prompt, the better the result.")
109
 
110
  with gr.Row():
111
  with gr.Column():
112
+ prompt_in = gr.Textbox(label="Prompt")
113
+ neg_in = gr.Textbox(label="Negative Prompt")
 
114
 
115
+ resolution = gr.Radio([512, 768, 1024], value=512, label="Resolution")
116
+ steps = gr.Slider(6,8,value=6,step=1,label="Steps")
117
 
118
+ eta = gr.Markdown("⏱️ Estimated: ~1m 0s")
119
  gen_btn = gr.Button("Generate")
120
 
121
  status = gr.Markdown("🟢 Ready")
 
123
  with gr.Column():
124
  gallery = gr.Gallery(columns=1)
125
 
 
126
  for ctrl in [steps, resolution]:
127
  ctrl.change(estimate_time, [steps, resolution], eta)
128
 
 
129
  gen_btn.click(
130
  generate,
131
+ inputs=[prompt_in, neg_in, resolution, steps],
132
  outputs=[gallery, status]
133
  )
134
 
135
+ demo.launch()