yongqiang commited on
Commit
64616a9
·
1 Parent(s): a039fe8

add ax620e_320x320_models & gradio_demo.py

Browse files
ax620e_320x320_models/img2img-init.png ADDED

Git LFS Details

  • SHA256: 42f0ee242d8caaee1aea5506c8318c6a920d559a63c6db8d79f993eebaf7d790
  • Pointer size: 131 Bytes
  • Size of remote file: 253 kB
ax620e_320x320_models/text_encoder/config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/patrick/.cache/huggingface/hub/models--lykon-models--dreamshaper-7/snapshots/c4c9f9bec821e1862a78cbf45685cfb35b93638d/text_encoder",
3
+ "architectures": [
4
+ "CLIPTextModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 0,
8
+ "dropout": 0.0,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "quick_gelu",
11
+ "hidden_size": 768,
12
+ "initializer_factor": 1.0,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 77,
17
+ "model_type": "clip_text_model",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "projection_dim": 768,
22
+ "torch_dtype": "float16",
23
+ "transformers_version": "4.33.0.dev0",
24
+ "vocab_size": 49408
25
+ }
ax620e_320x320_models/text_encoder/sd15_text_encoder_sim.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d348ba3a0f0c70552b92215a8f78496f1c2072364e393510e0101af382fbcf4
3
+ size 240153225
ax620e_320x320_models/time_input_img2img.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95d015256308e1be1af00793c77fa2ba8c934163beaa8015dec54d20048838cf
3
+ size 20608
ax620e_320x320_models/time_input_txt2img.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48a430879c6a81a907889a6bb2b73f48cc9dc45b52f047ad2ee5c2dddcd2d10
3
+ size 20608
ax620e_320x320_models/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
ax620e_320x320_models/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
ax620e_320x320_models/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
ax620e_320x320_models/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
ax620e_320x320_models/unet.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb7c119d421cac390ffc4c03305d865d020cacb3aed528a4b06bf8007dfaf78d
3
+ size 969190063
ax620e_320x320_models/vae_decoder.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c5f95c7ea5f94cdd5d76814e6b098fcff8e78c82faecd1e78723429f275e23
3
+ size 94370744
ax620e_320x320_models/vae_decoder.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4bd0815ff6220d79e12aaaa526a269280e86918020e6fc576050163984f57a4
3
+ size 198057245
ax620e_320x320_models/vae_encoder.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9124d11155d4ff6470a416264bae5412240e1ebe8c2b53e79935f8bcdfde0b8
3
+ size 60221332
ax620e_320x320_models/vae_encoder.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386ca46adcf4b612b1ffa8cff88dd06e5c53c6292e6da161b27c088734815d03
3
+ size 136728111
gradio_demo.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+ from functools import lru_cache
3
+ import argparse
4
+ import os
5
+ import warnings
6
+ import socket
7
+
8
+ import numpy as np
9
+ import torch
10
+ import gradio as gr
11
+
12
+ REQUIRED_GRADIO_VERSION = "5.42.0"
13
+ from PIL import Image
14
+
15
+ from diffusers.utils import make_image_grid, load_image
16
+ from diffusers.utils.torch_utils import randn_tensor
17
+
18
+ from launcher import (
19
+ resolve_dimensions,
20
+ compute_latent_shape,
21
+ get_embeds,
22
+ get_alphas_cumprod,
23
+ create_session,
24
+ prepare_init_image,
25
+ ensure_parent,
26
+ resolve_with_base,
27
+ add_noise,
28
+ retrieve_latents,
29
+ denoise_loop,
30
+ DiagonalGaussianDistribution,
31
+ AutoencoderKLOutput,
32
+ IMG2IMG_TIMESTEPS,
33
+ IMG2IMG_SELF_TIMESTEPS,
34
+ IMG2IMG_STEP_INDEX,
35
+ TXT2IMG_TIMESTEPS,
36
+ TIME_EMBED_KEY,
37
+ )
38
+
39
+
40
+ @lru_cache(maxsize=8)
41
+ def _cached_session(model_path: str, backend: str):
42
+ return create_session(model_path, backend)
43
+
44
+
45
+ def _check_gradio_version():
46
+ import gradio
47
+ ver = getattr(gradio, "__version__", None)
48
+ if ver is None or ver.split("+")[0] != REQUIRED_GRADIO_VERSION:
49
+ warnings.warn(
50
+ f"当前 gradio 版本为 {ver}, 建议使用 {REQUIRED_GRADIO_VERSION} 以避免兼容性问题。"
51
+ )
52
+
53
+
54
+ def _preload_models(model_dir: str, backend: str, isize: Union[int, str]):
55
+ """在前端启动前加载关键模型/输入,提前暴露加载错误。"""
56
+ backend = backend.lower()
57
+ model_suffix = ".axmodel" if backend == "axe" else ".onnx"
58
+ text_encoder_path = os.path.join(model_dir, "text_encoder", f"sd15_text_encoder_sim{model_suffix}")
59
+ unet_model = os.path.join(model_dir, f"unet{model_suffix}")
60
+ vae_decoder_model = os.path.join(model_dir, f"vae_decoder{model_suffix}")
61
+ vae_encoder_model = os.path.join(model_dir, f"vae_encoder{model_suffix}")
62
+
63
+ # 提前加载会话
64
+ _cached_session(text_encoder_path, backend)
65
+ _cached_session(unet_model, backend)
66
+ _cached_session(vae_decoder_model, backend)
67
+ # vae encoder 仅 img2img 用,若不存在可跳过
68
+ if os.path.exists(vae_encoder_model):
69
+ _cached_session(vae_encoder_model, backend)
70
+
71
+ # 预加载时间输入文件,txt2img & img2img
72
+ txt2img_time = os.path.join(model_dir, "time_input_txt2img.npy")
73
+ img2img_time = os.path.join(model_dir, "time_input_img2img.npy")
74
+ for path in (txt2img_time, img2img_time):
75
+ if os.path.exists(path):
76
+ np.load(path)
77
+ else:
78
+ warnings.warn(f"缺少时间输入文件: {path}")
79
+
80
+ # 确认分辨率合法,主要为了提前暴露 isize 参数错误
81
+ resolve_dimensions(isize, None, None)
82
+
83
+
84
+ def _list_host_ips() -> list:
85
+ ips = set()
86
+ try:
87
+ hostname = socket.gethostname()
88
+ infos = socket.getaddrinfo(hostname, None, family=socket.AF_INET)
89
+ for info in infos:
90
+ ip = info[4][0]
91
+ if ip and not ip.startswith("127."):
92
+ ips.add(ip)
93
+ except Exception:
94
+ pass
95
+ if not ips:
96
+ ips.add("127.0.0.1")
97
+ return sorted(ips)
98
+
99
+
100
+ def _prepare_init_image_any(image_source: Union[str, Image.Image], height: int, width: int) -> Tuple[Image.Image, np.ndarray]:
101
+ if isinstance(image_source, Image.Image):
102
+ image = image_source.resize((width, height)).convert("RGB")
103
+ image_show = image.copy()
104
+ np_img = (np.array(image).astype(np.float32) / 255.0)[None, ...]
105
+ np_img = torch.from_numpy(np_img.transpose(0, 3, 1, 2)).numpy()
106
+ np_img = 2.0 * np_img - 1.0
107
+ return image_show, np_img
108
+ image_show, processed = prepare_init_image(str(image_source), height, width)
109
+ return image_show, processed
110
+
111
+
112
+ def _denoise_loop(latent: np.ndarray,
113
+ prompt_embeds: np.ndarray,
114
+ time_inputs: np.ndarray,
115
+ timesteps: np.ndarray,
116
+ unet_session,
117
+ alphas_cumprod: np.ndarray,
118
+ final_alphas_cumprod: float,
119
+ generator: Optional[torch.Generator],
120
+ noise_dtype: torch.dtype,
121
+ self_timesteps: Optional[np.ndarray] = None,
122
+ step_index: Optional[list] = None) -> np.ndarray:
123
+ if time_inputs.shape[0] < len(timesteps):
124
+ raise ValueError("time_input 的步数少于推理步数")
125
+
126
+ device = torch.device("cpu")
127
+ for i, timestep in enumerate(timesteps):
128
+ latent = latent.astype(np.float32)
129
+ feeds = {
130
+ "sample": latent,
131
+ TIME_EMBED_KEY: np.expand_dims(time_inputs[i], axis=0),
132
+ "encoder_hidden_states": prompt_embeds,
133
+ }
134
+ noise_pred = unet_session.run(None, feeds)[0]
135
+
136
+ sample = latent
137
+ model_output = noise_pred
138
+ if self_timesteps is not None and step_index is not None:
139
+ prev_idx = step_index[i] + 1
140
+ if prev_idx < len(self_timesteps):
141
+ prev_timestep = int(self_timesteps[prev_idx])
142
+ else:
143
+ prev_timestep = int(timestep)
144
+ elif i + 1 < len(timesteps):
145
+ prev_timestep = int(timesteps[i + 1])
146
+ else:
147
+ prev_timestep = int(timestep)
148
+
149
+ alpha_prod_t = alphas_cumprod[int(timestep)]
150
+ alpha_prod_t_prev = alphas_cumprod[prev_timestep] if prev_timestep >= 0 else final_alphas_cumprod
151
+ beta_prod_t = 1 - alpha_prod_t
152
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
153
+
154
+ scaled_timestep = int(timestep) * 10
155
+ c_skip = 0.5 ** 2 / (scaled_timestep ** 2 + 0.5 ** 2)
156
+ c_out = scaled_timestep / (scaled_timestep ** 2 + 0.5 ** 2) ** 0.5
157
+ predicted_original_sample = (sample - (beta_prod_t ** 0.5) * model_output) / (alpha_prod_t ** 0.5)
158
+
159
+ denoised = c_out * predicted_original_sample + c_skip * sample
160
+ if i != len(timesteps) - 1:
161
+ if noise_dtype == torch.float32 and generator is None:
162
+ noise = torch.randn(model_output.shape, device=device, dtype=noise_dtype).cpu().numpy()
163
+ else:
164
+ noise_tensor = randn_tensor(model_output.shape, generator=generator, device=device, dtype=noise_dtype)
165
+ noise = noise_tensor.cpu().numpy()
166
+ prev_sample = (alpha_prod_t_prev ** 0.5) * denoised + (beta_prod_t_prev ** 0.5) * noise
167
+ else:
168
+ prev_sample = denoised
169
+
170
+ latent = prev_sample.astype(np.float32)
171
+
172
+ return latent
173
+
174
+
175
+ def run_pipeline(prompt: str,
176
+ model_dir: str = "./models",
177
+ backend: str = "axe",
178
+ isize: Union[int, str] = "512",
179
+ height: Optional[int] = None,
180
+ width: Optional[int] = None,
181
+ seed: Optional[int] = None,
182
+ time_input_override: Optional[str] = None,
183
+ init_image: Optional[Union[str, Image.Image]] = None,
184
+ save_path: Optional[str] = None):
185
+ backend = backend.lower()
186
+ is_img2img = init_image is not None
187
+
188
+ tokenizer_dir = os.path.join(model_dir, "tokenizer")
189
+ text_encoder_dir = os.path.join(model_dir, "text_encoder")
190
+
191
+ model_suffix = ".axmodel" if backend == "axe" else ".onnx"
192
+ text_encoder_path = os.path.join(text_encoder_dir, f"sd15_text_encoder_sim{model_suffix}")
193
+ unet_model = os.path.join(model_dir, f"unet{model_suffix}")
194
+ vae_decoder_model = os.path.join(model_dir, f"vae_decoder{model_suffix}")
195
+ vae_encoder_model = os.path.join(model_dir, f"vae_encoder{model_suffix}")
196
+ time_input_default = "time_input_img2img.npy" if is_img2img else "time_input_txt2img.npy"
197
+ if time_input_override:
198
+ time_input_path = resolve_with_base(time_input_override, model_dir)
199
+ else:
200
+ time_input_path = os.path.join(model_dir, time_input_default)
201
+
202
+ if isinstance(init_image, str):
203
+ init_image_source = resolve_with_base(init_image, model_dir)
204
+ else:
205
+ init_image_source = init_image
206
+
207
+ height, width = resolve_dimensions(isize, height, width)
208
+
209
+ device = torch.device("cpu")
210
+ if seed is None or int(seed) < 0:
211
+ seed_used = int(torch.seed())
212
+ else:
213
+ seed_used = int(seed)
214
+ generator: Optional[torch.Generator] = torch.manual_seed(seed_used)
215
+ noise_dtype = torch.float16 if is_img2img else torch.float32
216
+
217
+ prompt_embeds_npy = get_embeds(prompt, tokenizer_dir, text_encoder_path, backend)
218
+ alphas_cumprod, final_alphas_cumprod, _ = get_alphas_cumprod()
219
+
220
+ vae_encoder_session = _cached_session(vae_encoder_model, backend) if is_img2img else None
221
+ unet_session = _cached_session(unet_model, backend)
222
+ vae_decoder_session = _cached_session(vae_decoder_model, backend)
223
+
224
+ time_input = np.load(time_input_path)
225
+
226
+ if is_img2img:
227
+ init_image_show, init_image_np = _prepare_init_image_any(init_image_source, height, width)
228
+
229
+ vae_encoder_inp_name = vae_encoder_session.get_inputs()[0].name
230
+ vae_encoder_out = vae_encoder_session.run(None, {vae_encoder_inp_name: init_image_np})[0]
231
+
232
+ posterior = DiagonalGaussianDistribution(torch.from_numpy(vae_encoder_out).to(torch.float32))
233
+ vae_encode_info = AutoencoderKLOutput(latent_dist=posterior)
234
+ init_latents = retrieve_latents(vae_encode_info, generator=generator)
235
+ init_latents = init_latents * 0.18215
236
+ init_latents = torch.cat([init_latents], dim=0)
237
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=noise_dtype)
238
+ timestep_tensor = torch.tensor([int(IMG2IMG_TIMESTEPS[0])], device=device)
239
+ init_latents = add_noise(init_latents.to(device), noise, timestep_tensor)
240
+ latent = init_latents.detach().cpu().numpy()
241
+
242
+ timesteps = IMG2IMG_TIMESTEPS
243
+ self_timesteps = IMG2IMG_SELF_TIMESTEPS
244
+ step_index = IMG2IMG_STEP_INDEX
245
+ else:
246
+ batch, channels, latent_h, latent_w = compute_latent_shape(height, width)
247
+ if generator is None:
248
+ latents = torch.randn((batch, channels, latent_h, latent_w), device=device, dtype=torch.float32)
249
+ else:
250
+ latents = randn_tensor((batch, channels, latent_h, latent_w), generator=generator, device=device, dtype=torch.float32)
251
+ latent = latents.cpu().numpy()
252
+ init_image_show = None
253
+ timesteps = TXT2IMG_TIMESTEPS
254
+ self_timesteps = None
255
+ step_index = None
256
+
257
+ latent = denoise_loop(
258
+ latent=latent,
259
+ prompt_embeds=prompt_embeds_npy,
260
+ time_inputs=time_input,
261
+ timesteps=timesteps,
262
+ unet_session=unet_session,
263
+ alphas_cumprod=alphas_cumprod,
264
+ final_alphas_cumprod=final_alphas_cumprod,
265
+ generator=generator,
266
+ noise_dtype=noise_dtype,
267
+ self_timesteps=self_timesteps,
268
+ step_index=step_index,
269
+ )
270
+
271
+ latent = latent / 0.18215
272
+ vae_decoder_inp_name = vae_decoder_session.get_inputs()[0].name
273
+ image = vae_decoder_session.run(None, {vae_decoder_inp_name: latent.astype(np.float32)})[0]
274
+
275
+ image = np.transpose(image, (0, 2, 3, 1)).squeeze(axis=0)
276
+ image_denorm = np.clip(image / 2 + 0.5, 0, 1)
277
+ image_uint8 = (image_denorm * 255).round().astype("uint8")
278
+ pil_image = Image.fromarray(image_uint8[:, :, :3])
279
+
280
+ grid_img = None
281
+ if is_img2img:
282
+ grid_img = make_image_grid([init_image_show, pil_image], rows=1, cols=2)
283
+
284
+ if save_path:
285
+ ensure_parent(save_path)
286
+ pil_image.save(save_path)
287
+ if grid_img is not None:
288
+ grid_path = os.path.splitext(save_path)[0] + "_grid.png"
289
+ ensure_parent(grid_path)
290
+ grid_img.save(grid_path)
291
+
292
+ return pil_image, grid_img, seed_used
293
+
294
+
295
+ def gradio_generate(prompt: str,
296
+ init_image: Optional[Image.Image],
297
+ backend: str,
298
+ isize: str,
299
+ seed: Optional[float],
300
+ model_dir: str):
301
+ try:
302
+ image, grid_img, seed_used = run_pipeline(
303
+ prompt=prompt,
304
+ model_dir=model_dir,
305
+ backend=backend,
306
+ isize=isize,
307
+ seed=int(seed) if seed not in (None, "") else None,
308
+ init_image=init_image,
309
+ )
310
+ return image, grid_img, f"{seed_used}"
311
+ except Exception as exc: # pragma: no cover
312
+ warnings.warn(f"生成失败: {exc}")
313
+ return None, None, "生成失败"
314
+
315
+
316
+ def launch_gradio(
317
+ default_model_dir: str = "./models",
318
+ default_backend: str = "axe",
319
+ default_isize: str = "512",
320
+ server_name: Optional[str] = None,
321
+ server_port: Optional[int] = None,
322
+ share: bool = False,
323
+ ):
324
+ # 先加载模型,若失败直接抛出,避免用户打开页面后才发现错误
325
+ _check_gradio_version()
326
+ print("[INIT] 正在预加载模型与时间输入...")
327
+ _preload_models(default_model_dir, default_backend, default_isize)
328
+ print("[INIT] 模型预加载完成")
329
+ title_text = "Stable Diffusion LCM Demo"
330
+ subtitle_text = f"分辨率 {default_isize}"
331
+ with gr.Blocks(title=title_text) as demo:
332
+ gr.Markdown(f"### {title_text}")
333
+ gr.Markdown(f"**{subtitle_text}**")
334
+ gr.HTML(
335
+ """
336
+ <style>
337
+ .fixed-img-container {height: 320px; display:flex; align-items:center; justify-content:center; overflow:hidden;}
338
+ .fixed-img-container img {max-height: 100%; max-width: 100%; object-fit: contain;}
339
+ .gradio-fullscreen img {max-height: none !important; width: auto !important; height: auto !important; object-fit: contain;}
340
+ </style>
341
+ """
342
+ )
343
+ with gr.Row():
344
+ with gr.Column(scale=1):
345
+ prompt = gr.Textbox(
346
+ label="Prompt",
347
+ lines=4,
348
+ value="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k",
349
+ placeholder="输入提示词"
350
+ )
351
+ init_image = gr.Image(
352
+ label="Init Image (可选)",
353
+ type="pil",
354
+ image_mode="RGB",
355
+ elem_classes=["fixed-img-container"],
356
+ show_fullscreen_button=True,
357
+ )
358
+ seed = gr.Number(label="随机种子 (-1 表示随机)", value=-1, precision=0)
359
+ seed_info = gr.Textbox(label="实际种子", value="-", interactive=False)
360
+ run_btn = gr.Button("生成", variant="primary")
361
+ with gr.Column(scale=1):
362
+ output_image = gr.Image(
363
+ label="输出图像",
364
+ elem_classes=["fixed-img-container"],
365
+ show_fullscreen_button=True,
366
+ )
367
+ grid_image = gr.Image(
368
+ label="对比图 (img2img)",
369
+ elem_classes=["fixed-img-container"],
370
+ show_fullscreen_button=True,
371
+ )
372
+
373
+ run_btn.click(
374
+ fn=gradio_generate,
375
+ inputs=[prompt, init_image, gr.State(default_backend), gr.State(default_isize), seed, gr.State(default_model_dir)],
376
+ outputs=[output_image, grid_image, seed_info],
377
+ )
378
+
379
+ app = demo.queue(max_size=4)
380
+
381
+ target_port = server_port or 7860
382
+ host_candidates = []
383
+ if server_name:
384
+ host_candidates.append(server_name)
385
+ host_candidates.extend(_list_host_ips())
386
+ printed = set()
387
+ print("可访问地址 (请任选其一):")
388
+ for ip in host_candidates:
389
+ if ip and ip not in printed:
390
+ printed.add(ip)
391
+ print(f" http://{ip}:{target_port}")
392
+
393
+ app.launch(server_name=server_name, server_port=server_port, share=share)
394
+
395
+
396
+ def get_args():
397
+ parser = argparse.ArgumentParser(description="Gradio demo for Stable Diffusion LCM")
398
+ parser.add_argument("--model_dir", type=str, default="./models", help="模型目录路径")
399
+ parser.add_argument("--backend", choices=["axe", "onnx"], default="axe", help="推理后端")
400
+ parser.add_argument("--isize", type=str, default="512x512", help="输出分辨率,单值或HxW,需为8的倍数")
401
+ parser.add_argument("--server_name", type=str, default="0.0.0.0", help="Gradio server_name,例如0.0.0.0")
402
+ parser.add_argument("--server_port", type=int, default=7860, help="Gradio server_port,例如7860")
403
+ parser.add_argument("--share", action="store_true", help="开启 Gradio share 链接")
404
+ return parser.parse_args()
405
+
406
+
407
+ if __name__ == "__main__":
408
+ """
409
+ pip3 install gradio==5.42.0
410
+ python3 gradio_demo.py --model_dir models_1024x768 --isize 1024x768
411
+ """
412
+ args = get_args()
413
+ launch_gradio(
414
+ default_model_dir=args.model_dir,
415
+ default_backend=args.backend,
416
+ default_isize=args.isize,
417
+ server_name=args.server_name,
418
+ server_port=args.server_port,
419
+ share=args.share,
420
+ )