File size: 3,817 Bytes
181f56f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#!/usr/bin/env python

from __future__ import annotations

import os
import shlex
import subprocess

import gradio as gr
import numpy as np
import torch
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks

if os.getenv('SYSTEM') == 'spaces':
    subprocess.run(
        shlex.split(
            'pip install git+https://github.com/modelscope/modelscope.git@refs/pull/173/head'
        ))

DESCRIPTION = '# [ModelScope Chinese text2image (tiny)](https://www.modelscope.cn/models/damo/cv_diffusion_text-to-image-synthesis_tiny/summary)'

SPACE_ID = os.getenv('SPACE_ID')
if SPACE_ID is not None:
    DESCRIPTION += f'<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'

pipe = pipeline(Tasks.text_to_image_synthesis,
                'damo/cv_diffusion_text-to-image-synthesis_tiny')


def run(
    text: str,
    seed: int,
    num_steps_generator: int,
    num_steps_upscaler1: int,
    num_steps_upscaler2: int,
    guidance_scale: float,
) -> np.ndarray:
    torch.manual_seed(seed)
    results = pipe({
        'text': text,
        'solver': 'ddim',
        'generator_ddim_timesteps': num_steps_generator,
        'upsampler_256_ddim_timesteps': num_steps_upscaler1,
        'upsampler_1024_ddim_timesteps': num_steps_upscaler2,
        'generator_guide_scale': guidance_scale,
    })
    return results['output_imgs'][0]


examples = [
    ['中国山水画', 0, 250, 50, 20, 5.0],
]

with gr.Blocks(css='style.css') as demo:
    gr.Markdown(DESCRIPTION)
    with gr.Row():
        with gr.Column():
            text = gr.Text(label='Prompt')
            seed = gr.Slider(label='Seed',
                             minimum=0,
                             maximum=100000,
                             value=0,
                             step=1,
                             randomize=True)
            run_button = gr.Button('Run')
            with gr.Accordion('Advanced options', open=False):
                num_steps_generator = gr.Slider(label='Steps (Generator)',
                                                minimum=1,
                                                maximum=1000,
                                                value=250,
                                                step=1)
                num_steps_upscaler1 = gr.Slider(
                    label='Steps (Upscaler 64=>256)',
                    minimum=1,
                    maximum=50,
                    value=50,
                    step=1)
                num_steps_upscaler2 = gr.Slider(
                    label='Steps (Upscaler 256=>1024)',
                    minimum=1,
                    maximum=20,
                    value=20,
                    step=1)
                guidance_scale = gr.Slider(label='Guidance scale',
                                           minimum=0,
                                           maximum=100,
                                           value=5.0,
                                           step=0.1)
        with gr.Column():
            result = gr.Image(label='Output')

    inputs = [
        text,
        seed,
        num_steps_generator,
        num_steps_upscaler1,
        num_steps_upscaler2,
        guidance_scale,
    ]
    with gr.Row():
        gr.Examples(examples=examples,
                    inputs=inputs,
                    outputs=result,
                    fn=run,
                    cache_examples=True)

    text.submit(fn=run, inputs=inputs, outputs=result)
    run_button.click(fn=run, inputs=inputs, outputs=result)

demo.queue(api_open=False).launch()