| import gradio as gr |
| from diffusers import DiffusionPipeline |
| import torch |
|
|
| |
| pipeline = DiffusionPipeline.from_pretrained( |
| "John6666/t-ponynai3-v6-sdxl", |
| torch_dtype=torch.float16, |
| safety_checker=None, |
| ).to("cpu") |
|
|
| |
| pipeline.enable_attention_slicing() |
|
|
| def generate_image(prompt, negative_prompt, progress=gr.Progress()): |
| num_inference_steps = 20 |
| |
| |
| for i in range(num_inference_steps): |
| progress(i / num_inference_steps) |
| |
| image = pipeline(prompt, negative_prompt=negative_prompt, num_inference_steps=num_inference_steps).images[0] |
| |
| return image |
|
|
| |
| with gr.Blocks() as demo: |
| gr.Markdown("# Text-to-Image Generator with John6666/t-ponynai3-v6-sdxl models") |
|
|
| with gr.Row(): |
| with gr.Column(): |
| prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate") |
| negative_prompt = gr.Textbox(label="Enter negative prompt", placeholder="Describe what you want to avoid") |
| generate_button = gr.Button("Generate") |
| |
| with gr.Column(): |
| output_image = gr.Image(label="Generated Image") |
| |
| |
| generate_button.click(fn=generate_image, inputs=[prompt, negative_prompt], outputs=output_image) |
|
|
| |
| demo.launch() |
|
|