Spaces:
Paused
Paused
File size: 7,373 Bytes
da4c8ed dcfaebf ffc28ff dcfaebf ffc28ff dcfaebf 2f0c5b1 dcfaebf ffc28ff 2f0c5b1 551486d dcfaebf 2f0c5b1 ffc28ff dcfaebf ffc28ff dcfaebf ffc28ff dcfaebf ffc28ff 6338865 ffc28ff 6338865 ffc28ff 3e372bc 6338865 ffc28ff 6338865 3e372bc 6338865 3e372bc 6338865 df77b93 3e372bc 6338865 3e372bc ffc28ff 6338865 ffc28ff 6338865 ffc28ff 6338865 ffc28ff 6338865 ffc28ff 6338865 ffc28ff 3e372bc 6338865 3e372bc ffc28ff 6338865 ffc28ff 6338865 ffc28ff 6338865 ffc28ff 2f0c5b1 ffc28ff 758e184 2f0c5b1 ffc28ff 2f0c5b1 6338865 2f0c5b1 6338865 2f0c5b1 0cccd4d ffc28ff 2f0c5b1 acdb9d2 dcfaebf 6338865 ffc28ff df77b93 ffc28ff 2f0c5b1 df77b93 1b0b55c ffc28ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import gradio as gr
import torch
import random
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
from diffusers import DiffusionPipeline, LCMScheduler
from PIL import Image, ImageFilter
from gradio_client import Client
# ===============================
# LOCAL MODELS (CPU MODE)
# ===============================
TEXT_MODEL_ID = "HuggingFaceTB/SmolLM-135M-Instruct"
tokenizer = AutoTokenizer.from_pretrained(TEXT_MODEL_ID)
text_model = AutoModelForCausalLM.from_pretrained(TEXT_MODEL_ID)
def enhance_prompt(user_prompt: str) -> str:
if not user_prompt.strip():
return "A beautiful digital painting of a fantasy landscape"
instruction = (
f"<|im_start|>system\nYou are a prompt engineer. Expand the user's prompt into a detailed visual prompt. Output only the enhanced prompt.<|im_end|>\n"
f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
f"<|im_start|>assistant\n"
)
inputs = tokenizer(instruction, return_tensors="pt")
with torch.no_grad():
outputs = text_model.generate(**inputs, max_new_tokens=500, temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
return decoded.split("assistant")[-1].strip() if "assistant" in decoded else decoded.strip()
IMG_MODEL = "runwayml/stable-diffusion-v1-5"
LCM_LORA = "latent-consistency/lcm-lora-sdv1-5"
pipe = DiffusionPipeline.from_pretrained(IMG_MODEL, torch_dtype=torch.float32, safety_checker=None)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.load_lora_weights(LCM_LORA)
pipe.to("cpu")
pipe.enable_attention_slicing()
pipe.enable_vae_slicing()
pipe.set_progress_bar_config(disable=True)
# ===============================
# ULTRA MODE (Z-IMAGE-TURBO)
# ===============================
def call_ultra_api(prompt, negative, steps, resolution, seed):
try:
client = Client("mrfakename/Z-Image-Turbo")
# The API doesn't support a separate 'negative_prompt' argument,
# so we append it to the main prompt for better adherence.
full_prompt = f"{prompt} . {negative}" if negative else prompt
# Call the specific endpoint defined in your HTML
result = client.predict(
prompt=full_prompt,
width=int(resolution),
height=int(resolution),
num_inference_steps=int(steps),
seed=int(seed),
randomize_seed=False, # We control the seed from Python
api_name="/generate_image"
)
# The API returns [image_filepath, seed_used]
if isinstance(result, (list, tuple)):
return Image.open(result[0])
return Image.open(result)
except Exception as e:
print(f"Ultra Mode Error: {e}")
return None
# ===============================
# UI LOGIC
# ===============================
def toggle_ultra(is_ultra):
if is_ultra:
# Hide 768, extend steps to 20, show negative prompt
return {
negative_field: gr.update(visible=True),
resolution: gr.update(choices=[512, 1024], value=512),
steps: gr.update(minimum=1, maximum=20, value=9)
}
else:
# Normal mode settings
return {
negative_field: gr.update(visible=False),
resolution: gr.update(choices=[512, 768, 1024], value=512),
steps: gr.update(minimum=6, maximum=10, value=6)
}
def generate(prompt, user_neg, res, step_val, is_ultra):
size = int(res)
seed = random.randint(0, 2**32 - 1)
default_neg = "blurry, low quality, distorted, watermark"
if is_ultra:
yield (None, "🎨 Generating Image...", gr.update(interactive=False))
# Use user negative if provided, otherwise default
final_neg = user_neg if user_neg.strip() else default_neg
image = call_ultra_api(prompt, final_neg, step_val, size, seed)
if image:
yield (image, f"✅ Done (Ultra Mode). Seed: {seed}", gr.update(interactive=True))
else:
yield (None, "❌ API Busy or Error. Try again.", gr.update(interactive=True))
else:
# Normal CPU Mode
yield (None, "🧠 Analysing Prompt", gr.update(interactive=False))
enhanced = enhance_prompt(prompt)
yield (None, "🎨 Generating Image...", gr.update(interactive=False))
generator = torch.Generator("cpu").manual_seed(seed)
start = time.time()
image = pipe(
prompt=enhanced,
negative_prompt=default_neg,
num_inference_steps=int(step_val),
guidance_scale=1.0,
width=size,
height=size,
generator=generator
).images[0]
elapsed = int(time.time() - start)
for i in range(5):
blur = image.filter(ImageFilter.GaussianBlur(radius=(5 - i) * 2))
yield (blur, "🎨 Generating Image...", gr.update(interactive=False))
time.sleep(0.2)
yield (image, f"✅ Done in {elapsed}s.", gr.update(interactive=True))
# ===============================
# INTERFACE
# ===============================
custom_css = """
#container { max-width: 1000px; margin: auto; }
.generate-btn { background: linear-gradient(90deg, #2ecc71, #27ae60) !important; color: white !important; }
.status-box { font-size: 1.1em; padding: 10px; border-radius: 8px; background: #ffffff !important; border: 1px solid #ddd; }
.status-box * { color: black !important; }
"""
with gr.Blocks(theme=gr.themes.Soft(primary_hue="green"), css=custom_css) as demo:
with gr.Column(elem_id="container"):
gr.Markdown("# 🎨 Creeper AI - v4.0")
gr.Markdown("Generate images using Creeper AI. Use **Ultra-Fast** for GPU speeds.")
with gr.Row():
with gr.Column(scale=1):
prompt_field = gr.Textbox(
label="What do you want to see?",
placeholder="e.g. A futuristic city",
lines=3
)
negative_field = gr.Textbox(
label="Negative Prompt",
value="blurry, low quality, distorted",
visible=False
)
with gr.Accordion("Settings ⚙️", open=True):
resolution = gr.Radio([512, 768, 1024], value=512, label="Resolution")
steps = gr.Slider(6, 10, value=6, step=1, label="Inference Steps")
ultra_check = gr.Checkbox(label="Ultra-Fast Gen (A few IMGs per day)")
generate_btn = gr.Button("🚀 Generate Image", variant="primary", elem_classes="generate-btn")
with gr.Column(scale=1):
output_img = gr.Image(label="Result", interactive=False)
status = gr.Markdown("🟢 Ready", elem_classes="status-box")
# Connect the UI logic
ultra_check.change(
toggle_ultra,
inputs=[ultra_check],
outputs=[negative_field, resolution, steps]
)
generate_btn.click(
generate,
inputs=[prompt_field, negative_field, resolution, steps, ultra_check],
outputs=[output_img, status, generate_btn]
)
demo.launch() |