| |
| |
|
|
| import os |
| os.environ["CUDA_VISIBLE_DEVICES"] = "" |
|
|
| import spaces |
| import warnings |
| warnings.filterwarnings("ignore") |
|
|
| import argparse |
| import numpy as np |
| import gradio as gr |
| from pathlib import Path |
| from omegaconf import OmegaConf |
| from sampler_invsr import InvSamplerSR |
|
|
| from utils import util_common |
| from utils import util_image |
| from basicsr.utils.download_util import load_file_from_url |
|
|
| |
| try: |
| import torch |
| torch.set_default_device("cpu") |
| except: |
| pass |
|
|
|
|
| def get_configs(num_steps=1, chopping_size=128, seed=12345): |
| configs = OmegaConf.load("./configs/sample-sd-turbo.yaml") |
|
|
| if num_steps == 1: |
| configs.timesteps = [200,] |
| elif num_steps == 2: |
| configs.timesteps = [200, 100] |
| elif num_steps == 3: |
| configs.timesteps = [200, 100, 50] |
| elif num_steps == 4: |
| configs.timesteps = [200, 150, 100, 50] |
| elif num_steps == 5: |
| configs.timesteps = [250, 200, 150, 100, 50] |
| else: |
| assert num_steps <= 250 |
| configs.timesteps = np.linspace( |
| start=250, stop=0, num=num_steps, endpoint=False, dtype=np.int64() |
| ).tolist() |
|
|
| print(f'Setting timesteps for inference: {configs.timesteps}') |
|
|
| started_ckpt_name = "noise_predictor_sd_turbo_v5.pth" |
| started_ckpt_dir = "./weights" |
| util_common.mkdir(started_ckpt_dir, delete=False, parents=True) |
|
|
| started_ckpt_path = Path(started_ckpt_dir) / started_ckpt_name |
| if not started_ckpt_path.exists(): |
| load_file_from_url( |
| url="https://huggingface.co/OAOA/InvSR/resolve/main/noise_predictor_sd_turbo_v5.pth", |
| model_dir=started_ckpt_dir, |
| progress=True, |
| file_name=started_ckpt_name, |
| ) |
|
|
| configs.model_start.ckpt_path = str(started_ckpt_path) |
| configs.bs = 1 |
| configs.seed = seed |
| configs.basesr.chopping.pch_size = chopping_size |
|
|
| if chopping_size == 128: |
| configs.basesr.chopping.extra_bs = 8 |
| elif chopping_size == 256: |
| configs.basesr.chopping.extra_bs = 4 |
| else: |
| configs.basesr.chopping.extra_bs = 1 |
|
|
| return configs |
|
|
|
|
| |
| def predict(in_path, num_steps=1, chopping_size=128, seed=12345): |
| configs = get_configs(num_steps=num_steps, chopping_size=chopping_size, seed=seed) |
|
|
| sampler = InvSamplerSR(configs) |
|
|
| out_dir = Path('invsr_output') |
| out_dir.mkdir(exist_ok=True) |
|
|
| sampler.inference(in_path, out_path=out_dir, bs=1) |
|
|
| out_path = out_dir / f"{Path(in_path).stem}.png" |
| assert out_path.exists(), 'Super-resolution failed!' |
|
|
| im_sr = util_image.imread(out_path, chn="rgb", dtype="uint8") |
|
|
| return im_sr, str(out_path) |
|
|
|
|
| title = "Arbitrary-steps Image Super-resolution via Diffusion Inversion" |
|
|
| description = """ |
| <b>CPU version</b> of InvSR demo.<br> |
| ⚠️ Note: Running on CPU will be significantly slower than GPU. |
| """ |
|
|
| demo = gr.Interface( |
| fn=predict, |
| inputs=[ |
| gr.Image(type="filepath", label="Input Image"), |
| gr.Dropdown([1,2,3,4,5], value=1, label="Steps"), |
| gr.Dropdown([128, 256, 512], value=128, label="Chopping size"), |
| gr.Number(value=12345, precision=0, label="Seed") |
| ], |
| outputs=[ |
| gr.Image(type="numpy", label="Output Image"), |
| gr.File(label="Download") |
| ], |
| title=title, |
| description=description |
| ) |
|
|
| demo.queue(max_size=5) |
| demo.launch() |