Spaces:
Running
Running
File size: 2,910 Bytes
9bef04c b3671f1 56151a0 9bef04c f42acaf 9bef04c f42acaf 9bef04c e105ecd 9bef04c ccb52fe 56151a0 e105ecd f42acaf ccb52fe f42acaf e105ecd ccb52fe 9bef04c e105ecd 56151a0 f42acaf 9bef04c ccb52fe 56779b5 9bef04c e105ecd b3671f1 ccb52fe e105ecd 9bef04c b3671f1 e105ecd 56151a0 9bef04c b3671f1 9bef04c e105ecd 56779b5 b3671f1 f42acaf 9bef04c e105ecd 9bef04c f42acaf 9bef04c ccb52fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import gradio as gr
import torch
import re
import time
from diffusers import DiffusionPipeline
# from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
# prompt_enhancer_id = "succinctly/text2image-prompt-generator"
# enhancer_tokenizer = GPT2Tokenizer.from_pretrained(prompt_enhancer_id)
# enhancer_model = GPT2LMHeadModel.from_pretrained(prompt_enhancer_id)
# enhancer_pipe = pipeline("text-generation", model=enhancer_model, tokenizer=enhancer_tokenizer, device=device)
image_model_id = "SimianLuo/LCM_Dreamshaper_v7"
image_pipe = DiffusionPipeline.from_pretrained(image_model_id)
image_pipe.to(device)
def generate_workflow(prompt, width, height, steps):
start_time = time.time()
# yield "🔍 Thinking (analysing AI)...", None
# try:
# instructional_prompt = f"Enhance the user prompt... {prompt}"
# enhanced_results = enhancer_pipe(instructional_prompt, max_new_tokens=40, num_return_sequences=1)
# final_prompt = clean_and_format_prompt(enhanced_results[0]['generated_text'], prompt)
# except:
# final_prompt = f"{prompt}, centered and realistic (if applicable)"
final_prompt = f"{prompt}, centered and realistic (if applicable)"
yield "🎨 Generating (Image generator AI)...", None
image = image_pipe(
prompt=final_prompt,
width=int(width),
height=int(height),
num_inference_steps=int(steps),
guidance_scale=8.0,
lcm_origin_steps=50,
output_type="pil"
).images[0]
end_time = time.time()
duration = round(end_time - start_time, 2)
yield f"✅ Done in {duration}s", image
with gr.Blocks(theme=gr.themes.Soft(), title="AI Image Lab") as demo:
gr.Markdown("# ⚡️ AI Image PRO")
with gr.Row():
with gr.Column(scale=1):
prompt_input = gr.Textbox(
label="💡 Your Idea",
placeholder="e.g., A cute dragon",
lines=3
)
with gr.Row():
width_slider = gr.Slider(256, 768, 512, step=64, label="📏 Width")
height_slider = gr.Slider(256, 768, 512, step=64, label="📐 Height")
steps_slider = gr.Slider(4, 12, 5, step=1, label="🏃 Steps")
generate_btn = gr.Button("🚀 Generate", variant="primary")
with gr.Column(scale=1):
status_bar = gr.Markdown("### Status: ✅**Ready**")
image_output = gr.Image(label="🖼️ Result")
# refined_prompt_display = gr.Textbox(label="📝 Enhanced Prompt Used", interactive=False)
generate_btn.click(
fn=generate_workflow,
inputs=[prompt_input, width_slider, height_slider, steps_slider],
outputs=[status_bar, image_output]
)
demo.launch()
|