Spaces:
Runtime error
Runtime error
| from diffusers import LDMTextToImagePipeline | |
| import gradio as gr | |
| import PIL.Image | |
| import numpy as np | |
| import random | |
| import torch | |
| ldm_pipeline = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256") | |
| def predict(prompt, steps=100, seed=42, guidance_scale=6.0): | |
| torch.cuda.empty_cache() | |
| generator = torch.manual_seed(seed) | |
| images = ldm_pipeline([prompt], generator=generator, num_inference_steps=steps, eta=0.3, guidance_scale=guidance_scale)["sample"] | |
| return images[0] | |
| random_seed = random.randint(0, 2147483647) | |
| gr.Interface( | |
| predict, | |
| inputs=[ | |
| gr.inputs.Textbox(label='Prompt', default='a chalk pastel drawing of a llama wearing a wizard hat'), | |
| gr.inputs.Slider(1, 100, label='Inference Steps', default=50, step=1), | |
| gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1), | |
| gr.inputs.Slider(1.0, 20.0, label='Guidance Scale - how much the prompt will influence the results', default=6.0, step=0.1), | |
| ], | |
| outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"), | |
| css="#output_image{width: 256px}", | |
| ).launch() |