Xhaheen commited on
Commit
d2328c7
·
1 Parent(s): 55088db

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +147 -0
app.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
+ import gradio as gr
3
+ import torch
4
+ from PIL import Image
5
+
6
+ model_id = 'Xhaheen/srkay-man_6-1-2022'
7
+ prefix = 'A portrait of srkay man'
8
+
9
+ scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
10
+
11
+ pipe = StableDiffusionPipeline.from_pretrained(
12
+ model_id,
13
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
14
+ scheduler=scheduler)
15
+
16
+ pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
17
+ model_id,
18
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
19
+ scheduler=scheduler)
20
+
21
+ if torch.cuda.is_available():
22
+ pipe = pipe.to("cuda")
23
+ pipe_i2i = pipe_i2i.to("cuda")
24
+
25
+ def is_google_colab():
26
+ try:
27
+ import google.colab
28
+ return True
29
+ except:
30
+ return False
31
+
32
+ def error_str(error, title="Error"):
33
+ return f"""#### {title}
34
+ {error}""" if error else ""
35
+
36
+ def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
37
+
38
+ generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
39
+ prompt = f"{prefix} {prompt}" if auto_prefix else prompt
40
+
41
+ try:
42
+ if img is not None:
43
+ return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
44
+ else:
45
+ return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
46
+ except Exception as e:
47
+ return None, error_str(e)
48
+
49
+ def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
50
+
51
+ result = pipe(
52
+ prompt,
53
+ negative_prompt = neg_prompt,
54
+ num_inference_steps = int(steps),
55
+ guidance_scale = guidance,
56
+ width = width,
57
+ height = height,
58
+ generator = generator)
59
+
60
+ return result.images[0]
61
+
62
+ def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
63
+
64
+ ratio = min(height / img.height, width / img.width)
65
+ img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
66
+ result = pipe_i2i(
67
+ prompt,
68
+ negative_prompt = neg_prompt,
69
+ init_image = img,
70
+ num_inference_steps = int(steps),
71
+ strength = strength,
72
+ guidance_scale = guidance,
73
+ width = width,
74
+ height = height,
75
+ generator = generator)
76
+
77
+ return result.images[0]
78
+
79
+ css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}"""
80
+ with gr.Blocks(css=css) as demo:
81
+ gr.HTML(
82
+ f"""
83
+ <div class="main-div">
84
+ <div>
85
+ <h1>Srkay Dreambooth</h1>
86
+ </div>
87
+ <p>
88
+ Demo for <a href="https://huggingface.co/Xhaheen/srkay-man_6-1-2022">Srkay</a> Stable Diffusion model.<br>
89
+ {"Add the following tokens to your prompts for the model to work properly: <b>prefix</b>" if prefix else ""}
90
+ </p>
91
+ Running on {"<b>GPU 🔥</b>" if torch.cuda.is_available() else f"<b>CPU 🥶</b>. For faster inference it is recommended to <b>upgrade to GPU in <a href='https://huggingface.co/spaces/carlosabadia/hasbulla/settings'>Settings</a></b>"} after duplicating the space<br><br>
92
+ <a style="display:inline-block" href="https://huggingface.co/Xhaheen/srkay-man_6-1-2022?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
93
+ <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/drive/1ZB1_Z89BnjW_P76OLoQdcqVgPZfN8HEG?usp=sharing"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
94
+ </div>
95
+ """
96
+ )
97
+ with gr.Row():
98
+
99
+ with gr.Column(scale=55):
100
+ with gr.Group():
101
+ with gr.Row():
102
+ prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
103
+ generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
104
+
105
+ image_out = gr.Image(height=512)
106
+ error_output = gr.Markdown()
107
+
108
+ with gr.Column(scale=45):
109
+ with gr.Tab("Options"):
110
+ with gr.Group():
111
+ neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
112
+ auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically (A portrait of sha rukh khan person)", value=prefix, visible=prefix)
113
+
114
+ with gr.Row():
115
+ guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
116
+ steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
117
+
118
+ with gr.Row():
119
+ width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
120
+ height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
121
+
122
+ seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
123
+
124
+ with gr.Tab("Image to image"):
125
+ with gr.Group():
126
+ image = gr.Image(label="Image", height=256, tool="editor", type="pil")
127
+ strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
128
+
129
+ auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
130
+
131
+ inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
132
+ outputs = [image_out, error_output]
133
+ prompt.submit(inference, inputs=inputs, outputs=outputs)
134
+ generate.click(inference, inputs=inputs, outputs=outputs)
135
+
136
+ gr.HTML("""
137
+ <div style="border-top: 1px solid #303030;">
138
+ <br>
139
+ <p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
140
+ </div>
141
+ """)
142
+
143
+ demo.queue(concurrency_count=1)
144
+ if is_google_colab():
145
+ demo.launch()
146
+ else:
147
+ demo.launch()