Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from PIL import Image | |
| import model_loader | |
| import pipeline | |
| from transformers import CLIPTokenizer | |
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"Using device: {DEVICE}") | |
| # Load tokenizer and model | |
| model_file = "./data/v1-5-pruned-emaonly.ckpt" | |
| tokenizer = CLIPTokenizer("./data/vocab.json", merges_file="./data/merges.txt") | |
| models = model_loader.preload_models_from_standard_weights(model_file, DEVICE) | |
| def generate_image(prompt, uncond_prompt, cfg_scale, sampler, num_steps, seed, image, strength, width, height): | |
| input_image = Image.open(image) if image else None | |
| output_image = pipeline.generate( | |
| prompt=prompt, | |
| uncond_prompt=uncond_prompt, | |
| input_image=input_image, | |
| strength=strength, | |
| do_cfg=True, | |
| cfg_scale=cfg_scale, | |
| sampler_name=sampler, | |
| n_inference_steps=num_steps, | |
| seed=int(seed) if seed else None, | |
| models=models, | |
| device=DEVICE, | |
| idle_device="cpu", | |
| tokenizer=tokenizer, | |
| ) | |
| return Image.fromarray(output_image) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Text-to-Image Gradio Interface") | |
| prompt = gr.Textbox(label="Prompt", value="Astronaut in a jungle, cold color palette, muted colors, detailed, 8k") | |
| generate_btn = gr.Button("Run") | |
| output_image = gr.Image(label="Generated Image") | |
| with gr.Accordion("Advanced Settings", open=False): | |
| seed = gr.Number(value=42, label="Seed", interactive=True) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| cfg_scale = gr.Slider(1, 14, value=8, step=0.5, label="CFG Scale") | |
| sampler = gr.Dropdown(["ddpm", "ddim", "plms"], value="ddpm", label="Sampler") | |
| num_steps = gr.Slider(1, 100, value=50, step=1, label="Number of inference steps") | |
| image = gr.File(label="Upload Image (Optional)") | |
| strength = gr.Slider(0, 1, value=0.75, step=0.05, label="Strength (for Image-to-Image)") | |
| generate_btn.click( | |
| generate_image, | |
| inputs=[prompt, gr.Textbox(value="", visible=False), cfg_scale, sampler, num_steps, seed, image, strength], | |
| outputs=output_image | |
| ) | |
| demo.launch() | |