stpete2 commited on
Commit
71ae97e
·
verified ·
1 Parent(s): 2301fe1

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +944 -0
app.py ADDED
@@ -0,0 +1,944 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import warnings
4
+ import logging
5
+ import argparse
6
+ import json
7
+ import random
8
+ from datetime import datetime
9
+
10
+ import torch
11
+ import numpy as np
12
+ import cv2
13
+ from PIL import Image
14
+ from tqdm import tqdm
15
+ from natsort import natsorted, ns
16
+ from einops import rearrange
17
+ from omegaconf import OmegaConf
18
+ from huggingface_hub import snapshot_download
19
+ import spaces
20
+ import gradio as gr
21
+ import base64
22
+ import imageio_ffmpeg as ffmpeg
23
+ import subprocess
24
+ from different_domain_imge_gen.landmark_generation import generate_annotation
25
+
26
+ from transformers import (
27
+ Dinov2Model, CLIPImageProcessor, CLIPVisionModelWithProjection, AutoImageProcessor
28
+ )
29
+ from Next3d.training_avatar_texture.camera_utils import LookAtPoseSampler, FOV_to_intrinsics
30
+
31
+ import recon.dnnlib as dnnlib
32
+ import recon.legacy as legacy
33
+
34
+ from DiT_VAE.diffusion.utils.misc import read_config
35
+ from DiT_VAE.vae.triplane_vae import AutoencoderKL as AutoencoderKLTriplane
36
+ from DiT_VAE.diffusion import IDDPM, DPMS
37
+ from DiT_VAE.diffusion.model.nets import TriDitCLIPDINO_XL_2
38
+ from DiT_VAE.diffusion.data.datasets import get_chunks
39
+
40
+ # Get the directory of the current script
41
+ father_path = os.path.dirname(os.path.abspath(__file__))
42
+
43
+ # Add necessary paths dynamically
44
+ sys.path.extend([
45
+ os.path.join(father_path, 'recon'),
46
+ os.path.join(father_path, 'Next3d'),
47
+ os.path.join(father_path, 'data_process'),
48
+ os.path.join(father_path, 'data_process/lib')
49
+
50
+ ])
51
+
52
+ from lib.FaceVerse.renderer import Faceverse_manager
53
+ from data_process.input_img_align_extract_ldm_demo import Process
54
+ from lib.config.config_demo import cfg
55
+ import shutil
56
+
57
+ # Suppress warnings (especially for PyTorch)
58
+ warnings.filterwarnings("ignore")
59
+
60
+ # Configure logging settings
61
+ logging.basicConfig(
62
+ level=logging.INFO,
63
+ format="%(asctime)s - %(levelname)s - %(message)s"
64
+ )
65
+ from diffusers import (
66
+ StableDiffusionControlNetImg2ImgPipeline,
67
+ ControlNetModel,
68
+ DPMSolverMultistepScheduler,
69
+ AutoencoderKL,
70
+ )
71
+
72
+
73
+ def get_args():
74
+ """Parse and return command-line arguments."""
75
+ parser = argparse.ArgumentParser(description="4D Triplane Generation Arguments")
76
+
77
+ # Configuration and model checkpoints
78
+ parser.add_argument("--config", type=str, default="./configs/infer_config.py",
79
+ help="Path to the configuration file.")
80
+
81
+ # Generation parameters
82
+ parser.add_argument("--bs", type=int, default=1,
83
+ help="Batch size for processing.")
84
+ parser.add_argument("--cfg_scale", type=float, default=4.5,
85
+ help="CFG scale parameter.")
86
+ parser.add_argument("--sampling_algo", type=str, default="dpm-solver",
87
+ choices=["iddpm", "dpm-solver"],
88
+ help="Sampling algorithm to be used.")
89
+ parser.add_argument("--seed", type=int, default=0,
90
+ help="Random seed for reproducibility.")
91
+ # parser.add_argument("--select_img", type=str, default=None,
92
+ # help="Optional: Select a specific image.")
93
+ parser.add_argument('--step', default=-1, type=int)
94
+ # parser.add_argument('--use_demo_cam', action='store_true', help="Enable predefined camera parameters")
95
+ return parser.parse_args()
96
+
97
+
98
+ def set_env(seed=0):
99
+ """Set random seed for reproducibility across multiple frameworks."""
100
+ torch.manual_seed(seed) # Set PyTorch seed
101
+ torch.cuda.manual_seed_all(seed) # If using multi-GPU
102
+ np.random.seed(seed) # Set NumPy seed
103
+ random.seed(seed) # Set Python built-in random module seed
104
+ torch.set_grad_enabled(False) # Disable gradients for inference
105
+
106
+
107
+ def to_rgb_image(image: Image.Image):
108
+ """Convert an image to RGB format if necessary."""
109
+ if image.mode == 'RGB':
110
+ return image
111
+ elif image.mode == 'RGBA':
112
+ img = Image.new("RGB", image.size, (127, 127, 127))
113
+ img.paste(image, mask=image.getchannel('A'))
114
+ return img
115
+ else:
116
+ raise ValueError(f"Unsupported image type: {image.mode}")
117
+
118
+
119
+ def image_process(image_path, clip_image_processor, dino_img_processor, device):
120
+ """Preprocess an image for CLIP and DINO models."""
121
+ image = to_rgb_image(Image.open(image_path))
122
+ clip_image = clip_image_processor(images=image, return_tensors="pt").pixel_values.to(device)
123
+ dino_image = dino_img_processor(images=image, return_tensors="pt").pixel_values.to(device)
124
+ return dino_image, clip_image
125
+
126
+
127
+ # def video_gen(frames_dir, output_path, fps=30):
128
+ # """Generate a video from image frames."""
129
+ # frame_files = natsorted(os.listdir(frames_dir), alg=ns.PATH)
130
+ # frames = [cv2.imread(os.path.join(frames_dir, f)) for f in frame_files]
131
+ # H, W = frames[0].shape[:2]
132
+ # video_writer = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'MP4V'), fps, (W, H))
133
+ # for frame in frames:
134
+ # video_writer.write(frame)
135
+ # video_writer.release()
136
+
137
+
138
+ def trans(tensor_img):
139
+ img = (tensor_img.permute(0, 2, 3, 1) * 0.5 + 0.5).clamp(0, 1) * 255.
140
+ img = img.to(torch.uint8)
141
+ img = img[0].detach().cpu().numpy()
142
+
143
+ return img
144
+
145
+
146
+ def get_vert(vert_dir):
147
+ uvcoords_image = np.load(os.path.join(vert_dir))[..., :3]
148
+ uvcoords_image[..., -1][uvcoords_image[..., -1] < 0.5] = 0
149
+ uvcoords_image[..., -1][uvcoords_image[..., -1] >= 0.5] = 1
150
+ return torch.tensor(uvcoords_image.copy()).float().unsqueeze(0)
151
+
152
+
153
+ def generate_samples(DiT_model, cfg_scale, sample_steps, clip_feature, dino_feature, uncond_clip_feature,
154
+ uncond_dino_feature, device, latent_size, sampling_algo):
155
+ """
156
+ Generate latent samples using the specified diffusion model.
157
+
158
+ Args:
159
+ DiT_model (torch.nn.Module): The diffusion model.
160
+ cfg_scale (float): The classifier-free guidance scale.
161
+ sample_steps (int): Number of sampling steps.
162
+ clip_feature (torch.Tensor): CLIP feature tensor.
163
+ dino_feature (torch.Tensor): DINO feature tensor.
164
+ uncond_clip_feature (torch.Tensor): Unconditional CLIP feature tensor.
165
+ uncond_dino_feature (torch.Tensor): Unconditional DINO feature tensor.
166
+ device (str): Device for computation.
167
+ latent_size (tuple): The latent space size.
168
+ sampling_algo (str): The sampling algorithm ('iddpm' or 'dpm-solver').
169
+
170
+ Returns:
171
+ torch.Tensor: The generated samples.
172
+ """
173
+ n = 1 # Batch size
174
+ z = torch.randn(n, 8, latent_size[0], latent_size[1], device=device)
175
+
176
+ if sampling_algo == 'iddpm':
177
+ z = z.repeat(2, 1, 1, 1) # Duplicate for classifier-free guidance
178
+ model_kwargs = dict(y=torch.cat([clip_feature, uncond_clip_feature]),
179
+ img_feature=torch.cat([dino_feature, dino_feature]),
180
+ cfg_scale=cfg_scale)
181
+ diffusion = IDDPM(str(sample_steps))
182
+ samples = diffusion.p_sample_loop(DiT_model.forward_with_cfg, z.shape, z, clip_denoised=False,
183
+ model_kwargs=model_kwargs, progress=True, device=device)
184
+ samples, _ = samples.chunk(2, dim=0) # Remove unconditional samples
185
+
186
+ elif sampling_algo == 'dpm-solver':
187
+ dpm_solver = DPMS(DiT_model.forward_with_dpmsolver,
188
+ condition=[clip_feature, dino_feature],
189
+ uncondition=[uncond_clip_feature, dino_feature],
190
+ cfg_scale=cfg_scale)
191
+ samples = dpm_solver.sample(z, steps=sample_steps, order=2, skip_type="time_uniform", method="multistep")
192
+ else:
193
+ raise ValueError(f"Invalid sampling_algo '{sampling_algo}'. Choose either 'iddpm' or 'dpm-solver'.")
194
+
195
+ return samples
196
+
197
+
198
+ def load_motion_aware_render_model(ckpt_path, device):
199
+ """Load the motion-aware render model from a checkpoint."""
200
+ logging.info("Loading motion-aware render model...")
201
+ with dnnlib.util.open_url(ckpt_path, 'rb') as f:
202
+ network = legacy.load_network_pkl(f) # type: ignore
203
+ logging.info("Motion-aware render model loaded.")
204
+ return network['G_ema'].to(device)
205
+
206
+
207
+ def load_diffusion_model(ckpt_path, latent_size, device):
208
+ """Load the diffusion model (DiT)."""
209
+ logging.info("Loading diffusion model (DiT)...")
210
+
211
+ DiT_model = TriDitCLIPDINO_XL_2(input_size=latent_size).to(device)
212
+ ckpt = torch.load(ckpt_path, map_location="cpu")
213
+
214
+ # Remove keys that can cause mismatches
215
+ for key in ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']:
216
+ ckpt['state_dict'].pop(key, None)
217
+ ckpt.get('state_dict_ema', {}).pop(key, None)
218
+
219
+ state_dict = ckpt.get('state_dict_ema', ckpt)
220
+ DiT_model.load_state_dict(state_dict, strict=False)
221
+ DiT_model.eval()
222
+ logging.info("Diffusion model (DiT) loaded.")
223
+ return DiT_model
224
+
225
+
226
+ def load_vae_clip_dino(config, device):
227
+ """Load VAE, CLIP, and DINO models."""
228
+ logging.info("Loading VAE, CLIP, and DINO models...")
229
+
230
+ # Load CLIP image encoder
231
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
232
+ config.image_encoder_path)
233
+ image_encoder.requires_grad_(False)
234
+ image_encoder.to(device)
235
+
236
+ # Load VAE
237
+ config_vae = OmegaConf.load(config.vae_triplane_config_path)
238
+ vae_triplane = AutoencoderKLTriplane(ddconfig=config_vae['ddconfig'], lossconfig=None, embed_dim=8)
239
+ vae_triplane.to(device)
240
+
241
+ vae_ckpt_path = os.path.join(config.vae_pretrained, 'pytorch_model.bin')
242
+ if not os.path.isfile(vae_ckpt_path):
243
+ raise RuntimeError(f"VAE checkpoint not found at {vae_ckpt_path}")
244
+
245
+ vae_triplane.load_state_dict(torch.load(vae_ckpt_path, map_location="cpu"))
246
+ vae_triplane.requires_grad_(False)
247
+
248
+ # Load DINO model
249
+ dinov2 = Dinov2Model.from_pretrained(config.dino_pretrained)
250
+ dinov2.requires_grad_(False)
251
+ dinov2.to(device)
252
+
253
+ # Load image processors
254
+ dino_img_processor = AutoImageProcessor.from_pretrained(config.dino_pretrained)
255
+ clip_image_processor = CLIPImageProcessor()
256
+
257
+ logging.info("VAE, CLIP, and DINO models loaded.")
258
+ return vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor
259
+
260
+
261
+ def prepare_working_dir(dir, style):
262
+ print('stylestylestylestylestylestylestyle',style)
263
+ if style:
264
+ return dir
265
+ else:
266
+ import tempfile
267
+ working_dir = tempfile.TemporaryDirectory()
268
+ return working_dir.name
269
+
270
+ def launch_pretrained():
271
+ from huggingface_hub import snapshot_download
272
+ snapshot_download(
273
+ repo_id="KumaPower/AvatarArtist",
274
+ repo_type="model",
275
+ local_dir="./pretrained_model",
276
+ local_dir_use_symlinks=False
277
+ )
278
+
279
+
280
+ snapshot_download(
281
+ repo_id="stabilityai/stable-diffusion-2-base",
282
+ repo_type="model",
283
+ local_dir="./pretrained_model/sd21",
284
+ local_dir_use_symlinks=False
285
+ )
286
+ logging.info("delete models.")
287
+
288
+
289
+ os.remove('./pretrained_model/sd21/v2-1_512-ema-pruned.ckpt')
290
+ os.remove('./pretrained_model/sd21/v2-1_512-nonema-pruned.ckpt')
291
+
292
+ # 下载 CrucibleAI/ControlNetMediaPipeFace 的所有文件
293
+ snapshot_download(
294
+ repo_id="CrucibleAI/ControlNetMediaPipeFace",
295
+ repo_type="model",
296
+ local_dir="./pretrained_model/control",
297
+ local_dir_use_symlinks=False
298
+ )
299
+
300
+
301
+ def prepare_image_list(img_dir, selected_img):
302
+ """Prepare the list of image paths for processing."""
303
+ if selected_img and selected_img in os.listdir(img_dir):
304
+ return [os.path.join(img_dir, selected_img)]
305
+
306
+ return sorted([os.path.join(img_dir, img) for img in os.listdir(img_dir)])
307
+
308
+
309
+ def images_to_video(image_folder, output_video, fps=30):
310
+ # Get all image files and ensure correct order
311
+ images = [img for img in os.listdir(image_folder) if img.endswith((".png", ".jpg", ".jpeg"))]
312
+ images = natsorted(images) # Sort filenames naturally to preserve frame order
313
+
314
+ if not images:
315
+ print("❌ No images found in the directory!")
316
+ return
317
+
318
+ # Get the path to the FFmpeg executable
319
+ ffmpeg_exe = ffmpeg.get_ffmpeg_exe()
320
+ print(f"Using FFmpeg from: {ffmpeg_exe}")
321
+
322
+ # Define input image pattern (expects images named like "%04d.png")
323
+ image_pattern = os.path.join(image_folder, "%04d.png")
324
+
325
+ # FFmpeg command to encode video
326
+ command = [
327
+ ffmpeg_exe, '-framerate', str(fps), '-i', image_pattern,
328
+ '-c:v', 'libx264', '-preset', 'slow', '-crf', '18', # High-quality H.264 encoding
329
+ '-pix_fmt', 'yuv420p', '-b:v', '5000k', # Ensure compatibility & increase bitrate
330
+ output_video
331
+ ]
332
+
333
+ # Run FFmpeg command
334
+ subprocess.run(command, check=True)
335
+
336
+ print(f"✅ High-quality MP4 video has been generated: {output_video}")
337
+
338
+
339
+ def model_define():
340
+ args = get_args()
341
+ set_env(args.seed)
342
+ input_process_model = Process(cfg)
343
+
344
+ device = "cuda" if torch.cuda.is_available() else "cpu"
345
+ weight_dtype = torch.float32
346
+ logging.info(f"Running inference with {weight_dtype}")
347
+
348
+ # Load configuration
349
+ default_config = read_config(args.config)
350
+
351
+ # Ensure valid sampling algorithm
352
+ assert args.sampling_algo in ['iddpm', 'dpm-solver', 'sa-solver']
353
+ # Load motion-aware render model
354
+ motion_aware_render_model = load_motion_aware_render_model(default_config.motion_aware_render_model_ckpt, device)
355
+
356
+ # Load diffusion model (DiT)
357
+ triplane_size = (256 * 4, 256)
358
+ latent_size = (triplane_size[0] // 8, triplane_size[1] // 8)
359
+ sample_steps = args.step if args.step != -1 else {'iddpm': 100, 'dpm-solver': 20, 'sa-solver': 25}[
360
+ args.sampling_algo]
361
+ DiT_model = load_diffusion_model(default_config.DiT_model_ckpt, latent_size, device)
362
+
363
+ # Load VAE, CLIP, and DINO
364
+ vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor = load_vae_clip_dino(default_config,
365
+ device)
366
+
367
+ # Load normalization parameters
368
+ triplane_std = torch.load(default_config.std_dir).to(device).reshape(1, -1, 1, 1, 1)
369
+ triplane_mean = torch.load(default_config.mean_dir).to(device).reshape(1, -1, 1, 1, 1)
370
+
371
+ # Load average latent vector
372
+ ws_avg = torch.load(default_config.ws_avg_pkl).to(device)[0]
373
+
374
+ # Set up face verse for amimation
375
+ base_coff = np.load(
376
+ 'pretrained_model/temp.npy').astype(
377
+ np.float32)
378
+ base_coff = torch.from_numpy(base_coff).float()
379
+ Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
380
+
381
+ return motion_aware_render_model, sample_steps, DiT_model, \
382
+ vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, triplane_std, triplane_mean, ws_avg, Faceverse, device, input_process_model
383
+
384
+
385
+ def duplicate_batch(tensor, batch_size=2):
386
+ if tensor is None:
387
+ return None # 如果是 None,则直接返回
388
+ return tensor.repeat(batch_size, *([1] * (tensor.dim() - 1))) # 复制 batch 维度
389
+
390
+
391
+ @torch.inference_mode()
392
+ @spaces.GPU(duration=200)
393
+ def avatar_generation(items, save_path_base, video_path_input, source_type, is_styled, styled_img):
394
+ """
395
+ Generate avatars from input images.
396
+
397
+ Args:
398
+ items (list): List of image paths.
399
+ bs (int): Batch size.
400
+ sample_steps (int): Number of sampling steps.
401
+ cfg_scale (float): Classifier-free guidance scale.
402
+ save_path_base (str): Base directory for saving results.
403
+ DiT_model (torch.nn.Module): The diffusion model.
404
+ render_model (torch.nn.Module): The rendering model.
405
+ std (torch.Tensor): Standard deviation normalization tensor.
406
+ mean (torch.Tensor): Mean normalization tensor.
407
+ ws_avg (torch.Tensor): Latent average tensor.
408
+ """
409
+ if is_styled:
410
+ items = [styled_img]
411
+ else:
412
+ items = [items]
413
+ video_folder = "./demo_data/target_video"
414
+ video_name = os.path.basename(video_path_input).split(".")[0]
415
+ target_path = os.path.join(video_folder, 'data_' + video_name)
416
+ exp_base_dir = os.path.join(target_path, 'coeffs')
417
+ exp_img_base_dir = os.path.join(target_path, 'images512x512')
418
+ motion_base_dir = os.path.join(target_path, 'motions')
419
+ label_file_test = os.path.join(target_path, 'images512x512/dataset_realcam.json')
420
+
421
+ if source_type == 'example':
422
+ input_img_fvid = './demo_data/source_img/img_generate_different_domain/coeffs/trained_input_imgs'
423
+ input_img_motion = './demo_data/source_img/img_generate_different_domain/motions/trained_input_imgs'
424
+ elif source_type == 'custom':
425
+ input_img_fvid = os.path.join(save_path_base, 'processed_img/dataset/coeffs/input_image')
426
+ input_img_motion = os.path.join(save_path_base, 'processed_img/dataset/motions/input_image')
427
+ else:
428
+ raise ValueError("Wrong type")
429
+ bs = 1
430
+ sample_steps = 20
431
+ cfg_scale = 4.5
432
+ pitch_range = 0.25
433
+ yaw_range = 0.35
434
+ triplane_size = (256 * 4, 256)
435
+ latent_size = (triplane_size[0] // 8, triplane_size[1] // 8)
436
+ for chunk in tqdm(list(get_chunks(items, 1)), unit='batch'):
437
+ if bs != 1:
438
+ raise ValueError("Batch size > 1 not implemented")
439
+
440
+ image_dir = chunk[0]
441
+
442
+ image_name = os.path.splitext(os.path.basename(image_dir))[0]
443
+ dino_img, clip_image = image_process(image_dir, clip_image_processor, dino_img_processor, device)
444
+
445
+ clip_feature = image_encoder(clip_image, output_hidden_states=True).hidden_states[-2]
446
+ uncond_clip_feature = image_encoder(torch.zeros_like(clip_image), output_hidden_states=True).hidden_states[
447
+ -2]
448
+ dino_feature = dinov2(dino_img).last_hidden_state
449
+ uncond_dino_feature = dinov2(torch.zeros_like(dino_img)).last_hidden_state
450
+
451
+ samples = generate_samples(DiT_model, cfg_scale, sample_steps, clip_feature, dino_feature,
452
+ uncond_clip_feature, uncond_dino_feature, device, latent_size,
453
+ 'dpm-solver')
454
+
455
+ samples = (samples / 0.3994218)
456
+ samples = rearrange(samples, "b c (f h) w -> b c f h w", f=4)
457
+ samples = vae_triplane.decode(samples)
458
+ samples = rearrange(samples, "b c f h w -> b f c h w")
459
+ samples = samples * std + mean
460
+ torch.cuda.empty_cache()
461
+
462
+ save_frames_path_out = os.path.join(save_path_base, image_name, 'out')
463
+ save_frames_path_outshow = os.path.join(save_path_base, image_name, 'out_show')
464
+ save_frames_path_depth = os.path.join(save_path_base, image_name, 'depth')
465
+
466
+ os.makedirs(save_frames_path_out, exist_ok=True)
467
+ os.makedirs(save_frames_path_outshow, exist_ok=True)
468
+ os.makedirs(save_frames_path_depth, exist_ok=True)
469
+
470
+ img_ref = np.array(Image.open(image_dir))
471
+ img_ref_out = img_ref.copy()
472
+ img_ref = torch.from_numpy(img_ref.astype(np.float32) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0).to(device)
473
+
474
+ motion_app_dir = os.path.join(input_img_motion, image_name + '.npy')
475
+ motion_app = torch.tensor(np.load(motion_app_dir), dtype=torch.float32).unsqueeze(0).to(device)
476
+
477
+ id_motions = os.path.join(input_img_fvid, image_name + '.npy')
478
+
479
+ all_pose = json.loads(open(label_file_test).read())['labels']
480
+ all_pose = dict(all_pose)
481
+ if os.path.exists(id_motions):
482
+ coeff = np.load(id_motions).astype(np.float32)
483
+ coeff = torch.from_numpy(coeff).to(device).float().unsqueeze(0)
484
+ Faceverse.id_coeff = Faceverse.recon_model.split_coeffs(coeff)[0]
485
+ motion_dir = os.path.join(motion_base_dir, video_name)
486
+ exp_dir = os.path.join(exp_base_dir, video_name)
487
+ for frame_index, motion_name in enumerate(
488
+ tqdm(natsorted(os.listdir(motion_dir), alg=ns.PATH), desc="Processing Frames")):
489
+ exp_each_dir_img = os.path.join(exp_img_base_dir, video_name, motion_name.replace('.npy', '.png'))
490
+ exp_each_dir = os.path.join(exp_dir, motion_name)
491
+ motion_each_dir = os.path.join(motion_dir, motion_name)
492
+
493
+ # Load pose data
494
+ pose_key = os.path.join(video_name, motion_name.replace('.npy', '.png'))
495
+
496
+ cam2world_pose = LookAtPoseSampler.sample(
497
+ 3.14 / 2 + yaw_range * np.sin(2 * 3.14 * frame_index / len(os.listdir(motion_dir))),
498
+ 3.14 / 2 - 0.05 + pitch_range * np.cos(2 * 3.14 * frame_index / len(os.listdir(motion_dir))),
499
+ torch.tensor([0, 0, 0], device=device), radius=2.7, device=device)
500
+ pose_show = torch.cat([cam2world_pose.reshape(-1, 16),
501
+ FOV_to_intrinsics(fov_degrees=18.837, device=device).reshape(-1, 9)], 1).to(device)
502
+
503
+ pose = torch.tensor(np.array(all_pose[pose_key]).astype(np.float32)).float().unsqueeze(0).to(device)
504
+
505
+ # Load and resize expression image
506
+ exp_img = np.array(Image.open(exp_each_dir_img).resize((512, 512)))
507
+
508
+ # Load expression coefficients
509
+ exp_coeff = torch.from_numpy(np.load(exp_each_dir).astype(np.float32)).to(device).float().unsqueeze(0)
510
+ exp_target = Faceverse.make_driven_rendering(exp_coeff, res=256)
511
+
512
+ # Load motion data
513
+ motion = torch.tensor(np.load(motion_each_dir)).float().unsqueeze(0).to(device)
514
+
515
+ img_ref_double = duplicate_batch(img_ref, batch_size=2)
516
+ motion_app_double = duplicate_batch(motion_app, batch_size=2)
517
+ motion_double = duplicate_batch(motion, batch_size=2)
518
+ pose_double = torch.cat([pose_show, pose], dim=0)
519
+ exp_target_double = duplicate_batch(exp_target, batch_size=2)
520
+ samples_double = duplicate_batch(samples, batch_size=2)
521
+ # Select refine_net processing method
522
+ final_out = render_model(
523
+ img_ref_double, None, motion_app_double, motion_double, c=pose_double, mesh=exp_target_double,
524
+ triplane_recon=samples_double,
525
+ ws_avg=ws_avg, motion_scale=1.
526
+ )
527
+
528
+ # Process output image
529
+ final_out_show = trans(final_out['image_sr'][0].unsqueeze(0))
530
+ final_out_notshow = trans(final_out['image_sr'][1].unsqueeze(0))
531
+ depth = final_out['image_depth'][0].unsqueeze(0)
532
+ depth = -depth
533
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 2 - 1
534
+ depth = trans(depth)
535
+
536
+ depth = np.repeat(depth[:, :, :], 3, axis=2)
537
+ # Save output images
538
+ frame_name = f'{str(frame_index).zfill(4)}.png'
539
+ Image.fromarray(depth, 'RGB').save(os.path.join(save_frames_path_depth, frame_name))
540
+ Image.fromarray(final_out_notshow, 'RGB').save(os.path.join(save_frames_path_out, frame_name))
541
+
542
+ Image.fromarray(final_out_show, 'RGB').save(os.path.join(save_frames_path_outshow, frame_name))
543
+
544
+ # Generate videos
545
+ images_to_video(save_frames_path_out, os.path.join(save_path_base, image_name + '_out.mp4'))
546
+ images_to_video(save_frames_path_outshow, os.path.join(save_path_base, image_name + '_outshow.mp4'))
547
+ images_to_video(save_frames_path_depth, os.path.join(save_path_base, image_name + '_depth.mp4'))
548
+
549
+ logging.info(f"✅ Video generation completed successfully!")
550
+ return os.path.join(save_path_base, image_name + '_out.mp4'), os.path.join(save_path_base,
551
+ image_name + '_outshow.mp4'), os.path.join(save_path_base, image_name + '_depth.mp4')
552
+
553
+
554
+ def get_image_base64(path):
555
+ with open(path, "rb") as image_file:
556
+ encoded_string = base64.b64encode(image_file.read()).decode()
557
+ return f"data:image/png;base64,{encoded_string}"
558
+
559
+
560
+ def assert_input_image(input_image):
561
+ if input_image is None:
562
+ raise gr.Error("No image selected or uploaded!")
563
+
564
+
565
+ def process_image(input_image, source_type, is_style, save_dir):
566
+ """ 🎯 处理 input_image,根据是否是示例图片执行不同逻辑 """
567
+ process_img_input_dir = os.path.join(save_dir, 'input_image')
568
+ process_img_save_dir = os.path.join(save_dir, 'processed_img')
569
+ os.makedirs(process_img_save_dir, exist_ok=True)
570
+ os.makedirs(process_img_input_dir, exist_ok=True)
571
+ if source_type == "example":
572
+ return input_image, source_type
573
+ else:
574
+ # input_process_model.inference(input_image, process_img_save_dir)
575
+ shutil.copy(input_image, process_img_input_dir)
576
+ input_process_model.inference(process_img_input_dir, process_img_save_dir, is_img=True, is_video=False)
577
+ img_name = os.path.basename(input_image)
578
+ imge_dir = os.path.join(save_dir, 'processed_img/dataset/images512x512/input_image', img_name)
579
+ return imge_dir, source_type # 这里替换成 处理用户上传图片的逻辑
580
+
581
+
582
+ def style_transfer(processed_image, style_prompt, cfg, strength, save_base):
583
+ """
584
+ 🎭 这个函数用于风格转换
585
+ ✅ 你可以在这里填入你的风格化代码
586
+ """
587
+ src_img_pil = Image.open(processed_image)
588
+ img_name = os.path.basename(processed_image)
589
+ save_dir = os.path.join(save_base, 'style_img')
590
+ os.makedirs(save_dir, exist_ok=True)
591
+ control_image = generate_annotation(src_img_pil, max_faces=1)
592
+ trg_img_pil = pipeline_sd(
593
+ prompt=style_prompt,
594
+ image=src_img_pil,
595
+ strength=strength,
596
+ control_image=Image.fromarray(control_image),
597
+ guidance_scale=cfg,
598
+ negative_prompt='worst quality, normal quality, low quality, low res, blurry',
599
+ num_inference_steps=30,
600
+ controlnet_conditioning_scale=1.5
601
+ )['images'][0]
602
+ trg_img_pil.save(os.path.join(save_dir, img_name))
603
+ return os.path.join(save_dir, img_name) # 🚨 这里需要替换成你的风格转换逻辑
604
+
605
+
606
+ def reset_flag():
607
+ return False
608
+ css = """
609
+ /* ✅ 让所有 Image 居中 + 自适应宽度 */
610
+ .gr-image img {
611
+ display: block;
612
+ margin-left: auto;
613
+ margin-right: auto;
614
+ max-width: 100%;
615
+ height: auto;
616
+ }
617
+
618
+ /* ✅ 让所有 Video 居中 + 自适应宽度 */
619
+ .gr-video video {
620
+ display: block;
621
+ margin-left: auto;
622
+ margin-right: auto;
623
+ max-width: 100%;
624
+ height: auto;
625
+ }
626
+
627
+ /* ✅ 可选:让按钮和 markdown 居中 */
628
+ #generate_block {
629
+ display: flex;
630
+ flex-direction: column;
631
+ align-items: center;
632
+ justify-content: center;
633
+ margin-top: 1rem;
634
+ }
635
+
636
+
637
+ /* 可选:让整个容器宽一点 */
638
+ #main_container {
639
+ max-width: 1280px; /* ✅ 例如限制在 1280px 内 */
640
+ margin-left: auto; /* ✅ 水平居中 */
641
+ margin-right: auto;
642
+ padding-left: 1rem;
643
+ padding-right: 1rem;
644
+ }
645
+
646
+ """
647
+
648
+ def launch_gradio_app():
649
+ styles = {
650
+ "Ghibli": "Ghibli style avatar, anime style",
651
+ "Pixar": "a 3D render of a face in Pixar style",
652
+ "Lego": "a 3D render of a head of a lego man 3D model",
653
+ "Greek Statue": "a FHD photo of a white Greek statue",
654
+ "Elf": "a FHD photo of a face of a beautiful elf with silver hair in live action movie",
655
+ "Zombie": "a FHD photo of a face of a zombie",
656
+ "Tekken": "a 3D render of a Tekken game character",
657
+ "Devil": "a FHD photo of a face of a devil in fantasy movie",
658
+ "Steampunk": "Steampunk style portrait, mechanical, brass and copper tones",
659
+ "Mario": "a 3D render of a face of Super Mario",
660
+ "Orc": "a FHD photo of a face of an orc in fantasy movie",
661
+ "Masque": "a FHD photo of a face of a person in masquerade",
662
+ "Skeleton": "a FHD photo of a face of a skeleton in fantasy movie",
663
+ "Peking Opera": "a FHD photo of face of character in Peking opera with heavy make-up",
664
+ "Yoda": "a FHD photo of a face of Yoda in Star Wars",
665
+ "Hobbit": "a FHD photo of a face of Hobbit in Lord of the Rings",
666
+ "Stained Glass": "Stained glass style, portrait, beautiful, translucent",
667
+ "Graffiti": "Graffiti style portrait, street art, vibrant, urban, detailed, tag",
668
+ "Pixel-art": "pixel art style portrait, low res, blocky, pixel art style",
669
+ "Retro": "Retro game art style portrait, vibrant colors",
670
+ "Ink": "a portrait in ink style, black and white image",
671
+ }
672
+
673
+ with gr.Blocks(analytics_enabled=False, delete_cache=[3600, 3600], css=css, elem_id="main_container") as demo:
674
+ logo_url = "./docs/AvatarArtist.png"
675
+ logo_base64 = get_image_base64(logo_url)
676
+ # 🚀 让 Logo 居中 & 标题对齐
677
+ gr.HTML(
678
+ f"""
679
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center; margin-bottom: 20px;">
680
+ <img src="{logo_base64}" style="height:50px; margin-right: 15px; display: block;" onerror="this.style.display='none'"/>
681
+ <h1 style="font-size: 32px; font-weight: bold;">AvatarArtist: Open-Domain 4D Avatarization</h1>
682
+ </div>
683
+ """
684
+ )
685
+
686
+ # 🚀 让按钮在一行对齐
687
+ gr.HTML(
688
+ """
689
+ <div style="display: flex; justify-content: center; gap: 10px; margin-top: 10px;">
690
+ <a title="Website" href="https://kumapowerliu.github.io/AvatarArtist/" target="_blank" rel="noopener noreferrer">
691
+ <img src="https://img.shields.io/badge/Website-Visit-blue?style=for-the-badge&logo=GoogleChrome">
692
+ </a>
693
+ <a title="arXiv" href="https://arxiv.org/abs/2503.19906" target="_blank" rel="noopener noreferrer">
694
+ <img src="https://img.shields.io/badge/arXiv-Paper-red?style=for-the-badge&logo=arXiv">
695
+ </a>
696
+ <a title="Github" href="https://github.com/ant-research/AvatarArtist" target="_blank" rel="noopener noreferrer">
697
+ <img src="https://img.shields.io/github/stars/ant-research/AvatarArtist?style=for-the-badge&logo=github&logoColor=white&color=orange">
698
+ </a>
699
+ </div>
700
+ """
701
+ )
702
+ gr.HTML(
703
+ """
704
+ <div style="color: inherit; text-align: left; font-size: 16px; line-height: 1.6; margin-top: 20px; padding: 16px; border-radius: 10px; border: 1px solid rgba(0,0,0,0.1); background-color: rgba(240, 240, 240, 0.6); backdrop-filter: blur(2px);">
705
+ <strong>🧑‍🎨 How to use this demo:</strong>
706
+ <ol style="margin-top: 10px; padding-left: 20px;">
707
+ <li><strong>Select or upload a source image</strong> – this will be the avatar's face.</li>
708
+ <li><strong>Select or upload a target video</strong> – the avatar will mimic this motion.</li>
709
+ <li><strong>Click the <em>Process Image</em> button</strong> – this prepares the source image to meet our model's input requirements.</li>
710
+ <li><strong>(Optional)</strong> Click <em>Apply Style</em> to change the appearance of the processed image – we offer a variety of fun styles to choose from!</li>
711
+ <li><strong>Click <em>Generate Avatar</em></strong> to create the final animated result driven by the target video.</li>
712
+ </ol>
713
+ <p style="margin-top: 10px;"><strong>🎨 Tip:</strong> Try different styles to get various artistic effects for your avatar!</p>
714
+ </div>
715
+ """
716
+ )
717
+ # 🚀 添加重要提示框
718
+ gr.HTML(
719
+ """
720
+ <div style="background-color: #FFDDDD; padding: 15px; border-radius: 10px; border: 2px solid red; text-align: center; margin-top: 20px;">
721
+ <h4 style="color: red; font-size: 18px;">
722
+ 🚨 <strong>Important Notes:</strong> Please try to provide a <u>front-facing</u> or <u>full-face</u> image without obstructions.
723
+ </h4>
724
+ <p style="color: black; font-size: 16px;">
725
+ ❌ Our demo does <strong>not</strong> support uploading videos with specific motions because processing requires time.<br>
726
+ ✅ Feel free to check out our <a href="https://github.com/ant-research/AvatarArtist" target="_blank" style="color: red; font-weight: bold;">GitHub repository</a> to drive portraits using your desired motions.
727
+ </p>
728
+ </div>
729
+ """
730
+ )
731
+ # DISPLAY
732
+ image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/trained_input_imgs"
733
+ video_folder = "./demo_data/target_video"
734
+
735
+ examples_images = sorted(
736
+ [os.path.join(image_folder, f) for f in os.listdir(image_folder) if
737
+ f.lower().endswith(('.png', '.jpg', '.jpeg'))]
738
+ )
739
+ examples_videos = sorted(
740
+ [os.path.join(video_folder, f) for f in os.listdir(video_folder) if f.lower().endswith('.mp4')]
741
+ )
742
+ print(examples_videos)
743
+ source_type = gr.State("example")
744
+ is_from_example = gr.State(value=True)
745
+ is_styled = gr.State(value=False)
746
+ working_dir = gr.State()
747
+
748
+ with gr.Row():
749
+ with gr.Column(variant='panel'):
750
+ with gr.Tabs(elem_id="input_image"):
751
+ with gr.TabItem('🎨 Upload Image'):
752
+ input_image = gr.Image(
753
+ label="Upload Source Image",
754
+ value=os.path.join(image_folder, '02057_(2).png'),
755
+ image_mode="RGB", height=512, container=True,
756
+ sources="upload", type="filepath"
757
+ )
758
+
759
+ def mark_as_example(example_image):
760
+ print("✅ mark_as_example called")
761
+ return "example", True, False
762
+
763
+ def mark_as_custom(user_image, is_from_example_flag):
764
+ print("✅ mark_as_custom called")
765
+ if is_from_example_flag:
766
+ print("⚠️ Ignored mark_as_custom triggered by example")
767
+ return "example", False, False
768
+ return "custom", False, False
769
+
770
+ input_image.change(
771
+ mark_as_custom,
772
+ inputs=[input_image, is_from_example],
773
+ outputs=[source_type, is_from_example, is_styled] # ✅ 只返回 source_type,不要输出 input_image
774
+ )
775
+
776
+ # ✅ 让 `Examples` 组件单独占一行,并绑定点击事件
777
+ with gr.Row():
778
+ example_component = gr.Examples(
779
+ examples=examples_images,
780
+ inputs=[input_image],
781
+ examples_per_page=10,
782
+ )
783
+ # ✅ 监听 `Examples` 的 `click` 事件
784
+ example_component.dataset.click(
785
+ fn=mark_as_example,
786
+ inputs=[input_image],
787
+ outputs=[source_type, is_from_example, is_styled]
788
+ )
789
+
790
+ with gr.Column(variant='panel' ):
791
+ with gr.Tabs(elem_id="input_video"):
792
+ with gr.TabItem('🎬 Target Video'):
793
+ video_input = gr.Video(
794
+ label="Select Target Motion",
795
+ height=512, container=True,interactive=False, format="mp4",
796
+ value=examples_videos[0]
797
+ )
798
+
799
+ with gr.Row():
800
+ gr.Examples(
801
+ examples=examples_videos,
802
+ inputs=[video_input],
803
+ examples_per_page=10,
804
+ )
805
+ with gr.Column(variant='panel' ):
806
+ with gr.Tabs(elem_id="processed_image"):
807
+ with gr.TabItem('🖼️ Processed Image'):
808
+ processed_image = gr.Image(
809
+ label="Processed Image",
810
+ image_mode="RGB", type="filepath",
811
+ elem_id="processed_image",
812
+ height=512, container=True,
813
+ interactive=False
814
+ )
815
+ processed_image_button = gr.Button("🔧 Process Image", variant="primary")
816
+ with gr.Column(variant='panel' ):
817
+ with gr.Tabs(elem_id="style_transfer"):
818
+ with gr.TabItem('🎭 Style Transfer'):
819
+ style_image = gr.Image(
820
+ label="Style Image",
821
+ image_mode="RGB", type="filepath",
822
+ elem_id="style_image",
823
+ height=512, container=True,
824
+ interactive=False
825
+ )
826
+ style_choice = gr.Dropdown(
827
+ choices=list(styles.keys()),
828
+ label="Choose Style",
829
+ value="Pixar"
830
+ )
831
+ cfg_slider = gr.Slider(
832
+ minimum=3.0, maximum=10.0, value=7.5, step=0.1,
833
+ label="CFG Scale"
834
+ )
835
+ strength_slider = gr.Slider(
836
+ minimum=0.4, maximum=0.85, value=0.65, step=0.05,
837
+ label="SDEdit Strength"
838
+ )
839
+ style_button = gr.Button("🎨 Apply Style", interactive=False)
840
+ gr.Markdown(
841
+ "⬅️ Please click **Process Image** first. "
842
+ "**Apply Style** will transform the image in the **Processed Image** panel "
843
+ "according to the selected style."
844
+ )
845
+
846
+
847
+ with gr.Row():
848
+ with gr.Tabs(elem_id="render_output"):
849
+ with gr.TabItem('🎥 Animation Results'):
850
+ # ✅ 让 `Generate Avatar` 按钮单独占一行
851
+ with gr.Row():
852
+ with gr.Column(scale=1, elem_id="generate_block", min_width=200):
853
+ submit = gr.Button('🚀 Generate Avatar', elem_id="avatarartist_generate", variant='primary',
854
+ interactive=False)
855
+ gr.Markdown("⬇️ Please click **Process Image** first before generating.",
856
+ elem_id="generate_tip")
857
+
858
+ # ✅ 让两个 `Animation Results` 窗口并排
859
+ with gr.Row():
860
+ output_video = gr.Video(
861
+ label="Generated Animation Input Video View",
862
+ format="mp4", height=512, width=512,
863
+ autoplay=True
864
+ )
865
+
866
+ output_video_2 = gr.Video(
867
+ label="Generated Animation Rotate View",
868
+ format="mp4", height=512, width=512,
869
+ autoplay=True
870
+ )
871
+
872
+ output_video_3 = gr.Video(
873
+ label="Generated Animation Rotate View Depth",
874
+ format="mp4", height=512, width=512,
875
+ autoplay=True
876
+ )
877
+ def apply_style_and_mark(processed_image, style_choice, cfg, strength, working_dir):
878
+ styled = style_transfer(processed_image, styles[style_choice], cfg, strength, working_dir)
879
+ return styled, True
880
+
881
+ def process_image_and_enable_style(input_image, source_type, is_styled, wd):
882
+ processed_result, updated_source_type = process_image(input_image, source_type, is_styled, wd)
883
+ return processed_result, updated_source_type, gr.update(interactive=True), gr.update(interactive=True)
884
+ processed_image_button.click(
885
+ fn=prepare_working_dir,
886
+ inputs=[working_dir, is_styled],
887
+ outputs=[working_dir],
888
+ queue=False,
889
+ ).success(
890
+ fn=process_image_and_enable_style,
891
+ inputs=[input_image, source_type, is_styled, working_dir],
892
+ outputs=[processed_image, source_type, style_button, submit],
893
+ queue=True
894
+ )
895
+ style_button.click(
896
+ fn=apply_style_and_mark,
897
+ inputs=[processed_image, style_choice, cfg_slider, strength_slider, working_dir],
898
+ outputs=[style_image, is_styled]
899
+ )
900
+ submit.click(
901
+ fn=avatar_generation,
902
+ inputs=[processed_image, working_dir, video_input, source_type, is_styled, style_image],
903
+ outputs=[output_video, output_video_2, output_video_3], # ⏳ 稍后展示视频
904
+ queue=True
905
+ )
906
+
907
+
908
+ demo.queue()
909
+ demo.launch(server_name="0.0.0.0")
910
+
911
+
912
+ if __name__ == '__main__':
913
+ import torch.multiprocessing as mp
914
+ import transformers
915
+ mp.set_start_method('spawn', force=True)
916
+ launch_pretrained()
917
+ image_folder = "./demo_data/source_img/img_generate_different_domain/images512x512/demo_imgs"
918
+ example_img_names = os.listdir(image_folder)
919
+ render_model, sample_steps, DiT_model, \
920
+ vae_triplane, image_encoder, dinov2, dino_img_processor, clip_image_processor, std, mean, ws_avg, device, input_process_model = model_define()
921
+ controlnet_path = './pretrained_model/control'
922
+ controlnet = ControlNetModel.from_pretrained(
923
+ controlnet_path, torch_dtype=torch.float16
924
+ )
925
+ sd_path = './pretrained_model/sd21'
926
+ text_encoder = transformers.CLIPTextModel.from_pretrained(
927
+ sd_path,
928
+ subfolder="text_encoder",
929
+ num_hidden_layers=12 - (2 - 1),
930
+ torch_dtype=torch.float16
931
+ )
932
+ pipeline_sd = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
933
+ sd_path, torch_dtype=torch.float16, text_encoder=text_encoder,
934
+ use_safetensors=True, controlnet=controlnet, variant="fp16"
935
+ ).to(device)
936
+ pipeline_sd.scheduler=DPMSolverMultistepScheduler.from_config(pipeline_sd.scheduler.config, use_karras_sigmas=True)
937
+
938
+ demo_cam = False
939
+ base_coff = np.load(
940
+ 'pretrained_model/temp.npy').astype(
941
+ np.float32)
942
+ base_coff = torch.from_numpy(base_coff).float()
943
+ Faceverse = Faceverse_manager(device=device, base_coeff=base_coff)
944
+ launch_gradio_app()