File size: 3,443 Bytes
bfa59ab
 
0f34983
 
76b207d
bfa59ab
76b207d
bfa59ab
 
76b207d
bfa59ab
 
 
 
 
 
76b207d
bfa59ab
 
 
 
76b207d
 
 
 
 
 
 
 
bfa59ab
 
76b207d
bfa59ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76b207d
bfa59ab
76b207d
d1a45a9
 
 
76b207d
d1a45a9
 
 
 
 
 
 
 
76b207d
d1a45a9
bfa59ab
76b207d
bfa59ab
76b207d
cbbaa86
 
 
 
 
 
76b207d
bfa59ab
 
76b207d
 
bfa59ab
42ca564
76b207d
bfa59ab
76b207d
bfa59ab
76b207d
 
bfa59ab
76b207d
bfa59ab
 
76b207d
bfa59ab
76b207d
bfa59ab
 
76b207d
bfa59ab
76b207d
 
 
 
bfa59ab
76b207d
bfa59ab
 
 
76b207d
 
 
 
bfa59ab
 
76b207d
 
bfa59ab
 
76b207d
 
bfa59ab
 
76b207d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/env python
# -*- coding:utf-8 -*-

import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""  # 🔥 Force CPU

import spaces
import warnings
warnings.filterwarnings("ignore")

import argparse
import numpy as np
import gradio as gr
from pathlib import Path
from omegaconf import OmegaConf
from sampler_invsr import InvSamplerSR

from utils import util_common
from utils import util_image
from basicsr.utils.download_util import load_file_from_url

# Optional: enforce CPU in torch if used internally
try:
    import torch
    torch.set_default_device("cpu")
except:
    pass


def get_configs(num_steps=1, chopping_size=128, seed=12345):
    configs = OmegaConf.load("./configs/sample-sd-turbo.yaml")

    if num_steps == 1:
        configs.timesteps = [200,]
    elif num_steps == 2:
        configs.timesteps = [200, 100]
    elif num_steps == 3:
        configs.timesteps = [200, 100, 50]
    elif num_steps == 4:
        configs.timesteps = [200, 150, 100, 50]
    elif num_steps == 5:
        configs.timesteps = [250, 200, 150, 100, 50]
    else:
        assert num_steps <= 250
        configs.timesteps = np.linspace(
            start=250, stop=0, num=num_steps, endpoint=False, dtype=np.int64()
        ).tolist()

    print(f'Setting timesteps for inference: {configs.timesteps}')

    started_ckpt_name = "noise_predictor_sd_turbo_v5.pth"
    started_ckpt_dir = "./weights"
    util_common.mkdir(started_ckpt_dir, delete=False, parents=True)

    started_ckpt_path = Path(started_ckpt_dir) / started_ckpt_name
    if not started_ckpt_path.exists():
        load_file_from_url(
            url="https://huggingface.co/OAOA/InvSR/resolve/main/noise_predictor_sd_turbo_v5.pth",
            model_dir=started_ckpt_dir,
            progress=True,
            file_name=started_ckpt_name,
        )

    configs.model_start.ckpt_path = str(started_ckpt_path)
    configs.bs = 1
    configs.seed = seed 
    configs.basesr.chopping.pch_size = chopping_size

    if chopping_size == 128:
        configs.basesr.chopping.extra_bs = 8
    elif chopping_size == 256:
        configs.basesr.chopping.extra_bs = 4
    else:
        configs.basesr.chopping.extra_bs = 1

    return configs


# ❌ Removed @spaces.GPU
def predict(in_path, num_steps=1, chopping_size=128, seed=12345):
    configs = get_configs(num_steps=num_steps, chopping_size=chopping_size, seed=seed)

    sampler = InvSamplerSR(configs)

    out_dir = Path('invsr_output')
    out_dir.mkdir(exist_ok=True)

    sampler.inference(in_path, out_path=out_dir, bs=1)

    out_path = out_dir / f"{Path(in_path).stem}.png"
    assert out_path.exists(), 'Super-resolution failed!'

    im_sr = util_image.imread(out_path, chn="rgb", dtype="uint8")

    return im_sr, str(out_path)


title = "Arbitrary-steps Image Super-resolution via Diffusion Inversion"

description = """
<b>CPU version</b> of InvSR demo.<br>
⚠️ Note: Running on CPU will be significantly slower than GPU.
"""

demo = gr.Interface(
    fn=predict,
    inputs=[
        gr.Image(type="filepath", label="Input Image"),
        gr.Dropdown([1,2,3,4,5], value=1, label="Steps"),
        gr.Dropdown([128, 256, 512], value=128, label="Chopping size"),
        gr.Number(value=12345, precision=0, label="Seed")
    ],
    outputs=[
        gr.Image(type="numpy", label="Output Image"),
        gr.File(label="Download")
    ],
    title=title,
    description=description
)

demo.queue(max_size=5)
demo.launch()