Spaces:
Running
on
T4
Running
on
T4
| import gradio as gr | |
| import torch | |
| import numpy as np | |
| import modin.pandas as pd | |
| from PIL import Image | |
| from diffusers import DiffusionPipeline | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe = DiffusionPipeline.from_pretrained("prompthero/openjourney-v4", torch_dtype=torch.float16, safety_checker=None) | |
| pipe = pipe.to(device) | |
| def genie (prompt, scale, steps, Seed): | |
| generator = torch.Generator(device=device).manual_seed(Seed) | |
| images = pipe(prompt, num_inference_steps=steps, guidance_scale=scale, generator=generator).images[0] | |
| return images | |
| gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'), | |
| gr.Slider(1, maximum=15, value=10, step=.25, label='Prompt Guidance Scale:', interactive=True), | |
| gr.Slider(1, maximum=100, value=50, step=1, label='Number of Iterations: 50 is typically fine.'), | |
| gr.Slider(minimum=1, step=1, maximum=987654321, randomize=True, interactive=True)], | |
| outputs=gr.Image(label='512x512 Generated Image'), | |
| title="OpenJourney V4 GPU", | |
| description="OJ V4 GPU. Ultra Fast, now running on a T4", | |
| article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=True) |