WiNE-iNEFF's picture
Update app.py
c8fe760
raw
history blame
3.85 kB
import gradio as gr
import torch, torchvision
from torchvision import transforms
import torch.nn.functional as F
import numpy as np
from time import time, ctime
from PIL import Image, ImageColor
from diffusers import DDPMPipeline
from diffusers import DDIMScheduler, PNDMScheduler
from tqdm import tqdm
device = (
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu"
)
pipeline_name = 'WiNE-iNEFF/Minecraft-Skin-Diffusion'
image_pipe = DDPMPipeline.from_pretrained(pipeline_name).to(device)
class Model:
def __init__(self, name):
self.name = name
model = [
Model("DDIMScheduler"),
Model("PNDMScheduler")]
current_model = model[0]
def show_images_save(x):
"""Given a batch of images x, make a grid and convert to PIL"""
x = x * 0.5 + 0.5 # Map from (-1, 1) back to (0, 1)
grid = torchvision.utils.make_grid(x, nrow=4)
grid_im = grid.detach().cpu().permute(1, 2, 0).clip(0, 1) * 255
grid_im = Image.fromarray(np.array(grid_im).astype(np.uint8))
return grid_im
def generate(schedul):
if schedul == "DDIMScheduler":
scheduler = DDIMScheduler.from_pretrained(pipeline_name)
else:
scheduler = PNDMScheduler.from_pretrained(pipeline_name)
scheduler.set_timesteps(num_inference_steps=20)
x = torch.randn(1, 4, 64, 64).to(device)
# Minimal sampling loop
for i, t in tqdm(enumerate(scheduler.timesteps)):
model_input = scheduler.scale_model_input(x, t)
with torch.no_grad():
noise_pred = image_pipe.unet(model_input, t)["sample"]
x = scheduler.step(noise_pred, t, x).prev_sample
# View the results
return show_images_save(x)
def ex(scheduler):
t = time()
print(ctime(t))
return generate(scheduler), generate(scheduler)
demo = gr.Blocks(css="#img_size {max-height: 128px} .container {max-width: 730px; margin: auto;} .min-h-\[15rem\]{min-height: 5rem !important;}")
with demo:
gr.HTML(
"""
<div style="text-align: center; margin: 0 auto;">
<div style="display: inline-flex;align-items: center;gap: 0.8rem;font-size: 1.75rem;">
<h1 style="font-weight: 900; margin-bottom: 7px;margin-top:5px">
Minecraft Skin Diffusion
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%; line-height: 23px;">
Gradio demo for Minecraft Skin Diffusion. This is simple Unconditional Diffusion Model that will help you generate skins for game Minecraft.
</p>
</div>
"""
)
with gr.Column():
with gr.Row().style(equal_height=True):
model_name = gr.Dropdown(label="Base Scheduler", choices=[m.name for m in model], value=current_model.name)
#number = gr.Number(value="40", label="number of generation steps (Standard value 40, MAX 1000; The larger the number, the better the quality, but the longer it takes)", show_label=True)
with gr.Row().style(equal_height=True):
out = gr.Image(shape=(64,64), image_mode='RGBA', type='pil', elem_id='img_size')
out2 = gr.Image(shape=(64,64), image_mode='RGBA', type='pil', elem_id='img_size')
greet_btn = gr.Button("Generate")
greet_btn.click(fn=ex, inputs=[model_name], outputs=[out, out2])
gr.HTML(
"""
<div class="footer">
<div style='text-align: center;'>Minecraft Skin Diffusion by <a href='https://twitter.com/wine_ineff' target='_blank'>Artsem Holub (WiNE-iNEFF)</a> |
<center>
<img src='https://visitor-badge.glitch.me/badge?page_id=WiNE-iNEFF_MinecraftSkin-Diffusion' alt='visitor badge'>
</center>
</div>
</div>
"""
)
demo.launch()