aducsdr commited on
Commit
f70185d
·
verified ·
1 Parent(s): 19ee49f

Upload app (11).py

Browse files
Files changed (1) hide show
  1. app (11).py +509 -0
app (11).py ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # // Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
2
+ # //
3
+ # // Licensed under the Apache License, Version 2.0 (the "License");
4
+ # // you may not use this file except in compliance with the License.
5
+ # // You may obtain a copy of the License at
6
+ # //
7
+ # // http://www.apache.org/licenses/LICENSE-2.0
8
+ # //
9
+ # // Unless required by applicable law or agreed to in writing, software
10
+ # // distributed under the License is distributed on an "AS IS" BASIS,
11
+ # // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # // See the License for the specific language governing permissions and
13
+ # // limitations under the License.
14
+ import spaces
15
+ import subprocess
16
+
17
+ import os
18
+ import torch
19
+ import mediapy
20
+ from einops import rearrange
21
+ from omegaconf import OmegaConf
22
+ print(os.getcwd())
23
+ import datetime
24
+ from tqdm import tqdm
25
+ import gc
26
+
27
+ from data.image.transforms.divisible_crop import DivisibleCrop
28
+ from data.image.transforms.na_resize import NaResize
29
+ from data.video.transforms.rearrange import Rearrange
30
+ if os.path.exists("./projects/video_diffusion_sr/color_fix.py"):
31
+ from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
32
+ use_colorfix=True
33
+ else:
34
+ use_colorfix = False
35
+ print('Note!!!!!! Color fix is not avaliable!')
36
+ from torchvision.transforms import Compose, Lambda, Normalize
37
+ from torchvision.io.video import read_video
38
+ import argparse
39
+ from PIL import Image
40
+
41
+ from common.distributed import (
42
+ get_device,
43
+ init_torch,
44
+ )
45
+
46
+ from common.distributed.advanced import (
47
+ get_data_parallel_rank,
48
+ get_data_parallel_world_size,
49
+ get_sequence_parallel_rank,
50
+ get_sequence_parallel_world_size,
51
+ init_sequence_parallel,
52
+ )
53
+
54
+ from projects.video_diffusion_sr.infer import VideoDiffusionInfer
55
+ from common.config import load_config
56
+ from common.distributed.ops import sync_data
57
+ from common.seed import set_seed
58
+ from common.partition import partition_by_groups, partition_by_size
59
+
60
+ import gradio as gr
61
+ from pathlib import Path
62
+ from urllib.parse import urlparse
63
+ from torch.hub import download_url_to_file, get_dir
64
+ import shlex
65
+ import uuid
66
+ import mimetypes
67
+ import torchvision.transforms as T
68
+
69
+ os.environ["MASTER_ADDR"] = "127.0.0.1"
70
+ os.environ["MASTER_PORT"] = "12355"
71
+ os.environ["RANK"] = str(0)
72
+ os.environ["WORLD_SIZE"] = str(1)
73
+
74
+ subprocess.run(
75
+ "pip install flash-attn --no-build-isolation",
76
+ env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
77
+ shell=True,
78
+ )
79
+
80
+ def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
81
+ """Load file from http url, will download models if necessary.
82
+
83
+ Reference: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
84
+
85
+ Args:
86
+ url (str): URL to be downloaded.
87
+ model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir.
88
+ Default: None.
89
+ progress (bool): Whether to show the download progress. Default: True.
90
+ file_name (str): The downloaded file name. If None, use the file name in the url. Default: None.
91
+
92
+ Returns:
93
+ str: The path to the downloaded file.
94
+ """
95
+ if model_dir is None: # use the pytorch hub_dir
96
+ hub_dir = get_dir()
97
+ model_dir = os.path.join(hub_dir, 'checkpoints')
98
+
99
+ os.makedirs(model_dir, exist_ok=True)
100
+
101
+ parts = urlparse(url)
102
+ filename = os.path.basename(parts.path)
103
+ if file_name is not None:
104
+ filename = file_name
105
+ cached_file = os.path.abspath(os.path.join(model_dir, filename))
106
+ if not os.path.exists(cached_file):
107
+ print(f'Downloading: "{url}" to {cached_file}\n')
108
+ download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
109
+ return cached_file
110
+
111
+
112
+ # os.system("pip freeze")
113
+ ckpt_dir = Path('./ckpts')
114
+ if not ckpt_dir.exists():
115
+ ckpt_dir.mkdir()
116
+
117
+ pretrain_model_url = {
118
+ 'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
119
+ 'dit': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
120
+ 'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
121
+ 'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt',
122
+ 'apex': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl'
123
+ }
124
+ # download weights
125
+ if not os.path.exists('./ckpts/seedvr2_ema_3b.pth'):
126
+ load_file_from_url(url=pretrain_model_url['dit'], model_dir='./ckpts/', progress=True, file_name=None)
127
+ if not os.path.exists('./ckpts/ema_vae.pth'):
128
+ load_file_from_url(url=pretrain_model_url['vae'], model_dir='./ckpts/', progress=True, file_name=None)
129
+ if not os.path.exists('./pos_emb.pt'):
130
+ load_file_from_url(url=pretrain_model_url['pos_emb'], model_dir='./', progress=True, file_name=None)
131
+ if not os.path.exists('./neg_emb.pt'):
132
+ load_file_from_url(url=pretrain_model_url['neg_emb'], model_dir='./', progress=True, file_name=None)
133
+ if not os.path.exists('./apex-0.1-cp310-cp310-linux_x86_64.whl'):
134
+ load_file_from_url(url=pretrain_model_url['apex'], model_dir='./', progress=True, file_name=None)
135
+
136
+ subprocess.run(shlex.split("pip install apex-0.1-cp310-cp310-linux_x86_64.whl"))
137
+ print(f"✅ setup completed Apex")
138
+
139
+ # download images
140
+ torch.hub.download_url_to_file(
141
+ 'https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/23_1_lq.mp4',
142
+ '01.mp4')
143
+ torch.hub.download_url_to_file(
144
+ 'https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/28_1_lq.mp4',
145
+ '02.mp4')
146
+ torch.hub.download_url_to_file(
147
+ 'https://huggingface.co/datasets/Iceclear/SeedVR_VideoDemos/resolve/main/seedvr_videos_crf23/aigc1k/2_1_lq.mp4',
148
+ '03.mp4')
149
+
150
+ def configure_sequence_parallel(sp_size):
151
+ if sp_size > 1:
152
+ init_sequence_parallel(sp_size)
153
+
154
+ @spaces.GPU(duration=100)
155
+ def configure_runner(sp_size):
156
+ config_path = os.path.join('./configs_3b', 'main.yaml')
157
+ config = load_config(config_path)
158
+ runner = VideoDiffusionInfer(config)
159
+ OmegaConf.set_readonly(runner.config, False)
160
+
161
+ init_torch(cudnn_benchmark=False, timeout=datetime.timedelta(seconds=3600))
162
+ configure_sequence_parallel(sp_size)
163
+ runner.configure_dit_model(device="cuda", checkpoint='./ckpts/seedvr2_ema_3b.pth')
164
+ runner.configure_vae_model()
165
+ # Set memory limit.
166
+ if hasattr(runner.vae, "set_memory_limit"):
167
+ runner.vae.set_memory_limit(**runner.config.vae.memory_limit)
168
+ return runner
169
+
170
+ @spaces.GPU(duration=100)
171
+ def generation_step(runner, text_embeds_dict, cond_latents):
172
+ def _move_to_cuda(x):
173
+ return [i.to(torch.device("cuda")) for i in x]
174
+
175
+ noises = [torch.randn_like(latent) for latent in cond_latents]
176
+ aug_noises = [torch.randn_like(latent) for latent in cond_latents]
177
+ print(f"Generating with noise shape: {noises[0].size()}.")
178
+ noises, aug_noises, cond_latents = sync_data((noises, aug_noises, cond_latents), 0)
179
+ noises, aug_noises, cond_latents = list(
180
+ map(lambda x: _move_to_cuda(x), (noises, aug_noises, cond_latents))
181
+ )
182
+ cond_noise_scale = 0.1
183
+
184
+ def _add_noise(x, aug_noise):
185
+ t = (
186
+ torch.tensor([1000.0], device=torch.device("cuda"))
187
+ * cond_noise_scale
188
+ )
189
+ shape = torch.tensor(x.shape[1:], device=torch.device("cuda"))[None]
190
+ t = runner.timestep_transform(t, shape)
191
+ print(
192
+ f"Timestep shifting from"
193
+ f" {1000.0 * cond_noise_scale} to {t}."
194
+ )
195
+ x = runner.schedule.forward(x, aug_noise, t)
196
+ return x
197
+
198
+ conditions = [
199
+ runner.get_condition(
200
+ noise,
201
+ task="sr",
202
+ latent_blur=_add_noise(latent_blur, aug_noise),
203
+ )
204
+ for noise, aug_noise, latent_blur in zip(noises, aug_noises, cond_latents)
205
+ ]
206
+
207
+ with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
208
+ video_tensors = runner.inference(
209
+ noises=noises,
210
+ conditions=conditions,
211
+ dit_offload=False,
212
+ **text_embeds_dict,
213
+ )
214
+
215
+ samples = [
216
+ (
217
+ rearrange(video[:, None], "c t h w -> t c h w")
218
+ if video.ndim == 3
219
+ else rearrange(video, "c t h w -> t c h w")
220
+ )
221
+ for video in video_tensors
222
+ ]
223
+ del video_tensors
224
+
225
+ return samples
226
+
227
+ @spaces.GPU(duration=100)
228
+ def generation_loop(video_path='./test_videos', seed=666, fps_out=12, batch_size=1, cfg_scale=1.0, cfg_rescale=0.0, sample_steps=1, res_h=1280, res_w=720, sp_size=1):
229
+ runner = configure_runner(1)
230
+
231
+ def _extract_text_embeds():
232
+ # Text encoder forward.
233
+ positive_prompts_embeds = []
234
+ for texts_pos in tqdm(original_videos_local):
235
+ text_pos_embeds = torch.load('pos_emb.pt')
236
+ text_neg_embeds = torch.load('neg_emb.pt')
237
+
238
+ positive_prompts_embeds.append(
239
+ {"texts_pos": [text_pos_embeds], "texts_neg": [text_neg_embeds]}
240
+ )
241
+ gc.collect()
242
+ torch.cuda.empty_cache()
243
+ return positive_prompts_embeds
244
+
245
+ def cut_videos(videos, sp_size):
246
+ if videos.size(1) > 121:
247
+ videos = videos[:, :121]
248
+ t = videos.size(1)
249
+ if t <= 4 * sp_size:
250
+ print(f"Cut input video size: {videos.size()}")
251
+ padding = [videos[:, -1].unsqueeze(1)] * (4 * sp_size - t + 1)
252
+ padding = torch.cat(padding, dim=1)
253
+ videos = torch.cat([videos, padding], dim=1)
254
+ return videos
255
+ if (t - 1) % (4 * sp_size) == 0:
256
+ return videos
257
+ else:
258
+ padding = [videos[:, -1].unsqueeze(1)] * (
259
+ 4 * sp_size - ((t - 1) % (4 * sp_size))
260
+ )
261
+ padding = torch.cat(padding, dim=1)
262
+ videos = torch.cat([videos, padding], dim=1)
263
+ assert (videos.size(1) - 1) % (4 * sp_size) == 0
264
+ return videos
265
+
266
+ # classifier-free guidance
267
+ runner.config.diffusion.cfg.scale = cfg_scale
268
+ runner.config.diffusion.cfg.rescale = cfg_rescale
269
+ # sampling steps
270
+ runner.config.diffusion.timesteps.sampling.steps = sample_steps
271
+ runner.configure_diffusion()
272
+
273
+ # set random seed
274
+ seed = seed % (2**32) # avoid over range
275
+ set_seed(seed, same_across_ranks=True)
276
+ os.makedirs('output/', exist_ok=True)
277
+
278
+ # get test prompts
279
+ original_videos = [video_path.split('/')[-1]]
280
+
281
+ # divide the prompts into different groups
282
+ original_videos_group = original_videos
283
+ # store prompt mapping
284
+ original_videos_local = original_videos_group
285
+ original_videos_local = partition_by_size(original_videos_local, batch_size)
286
+
287
+ # pre-extract the text embeddings
288
+ positive_prompts_embeds = _extract_text_embeds()
289
+
290
+ video_transform = Compose(
291
+ [
292
+ NaResize(
293
+ resolution=(
294
+ res_h * res_w
295
+ )
296
+ ** 0.5,
297
+ mode="area",
298
+ # Upsample image, model only trained for high res.
299
+ downsample_only=False,
300
+ ),
301
+ Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
302
+ DivisibleCrop((16, 16)),
303
+ Normalize(0.5, 0.5),
304
+ Rearrange("t c h w -> c t h w"),
305
+ ]
306
+ )
307
+
308
+ # generation loop
309
+ for videos, text_embeds in tqdm(zip(original_videos_local, positive_prompts_embeds)):
310
+ # read condition latents
311
+ cond_latents = []
312
+ for video in videos:
313
+ media_type, _ = mimetypes.guess_type(video_path)
314
+ is_image = media_type and media_type.startswith("image")
315
+ is_video = media_type and media_type.startswith("video")
316
+ if is_video:
317
+ video = (
318
+ read_video(
319
+ os.path.join(video_path), output_format="TCHW"
320
+ )[0]
321
+ / 255.0
322
+ )
323
+ if video.size(0) > 121:
324
+ video = video[:121]
325
+ print(f"Read video size: {video.size()}")
326
+ output_dir = 'output/' + str(uuid.uuid4()) + '.mp4'
327
+ else:
328
+ video_transform = Compose(
329
+ [
330
+ NaResize(
331
+ resolution=(
332
+ 2560 * 1440
333
+ )
334
+ ** 0.5,
335
+ mode="area",
336
+ # Upsample image, model only trained for high res.
337
+ downsample_only=False,
338
+ ),
339
+ Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
340
+ DivisibleCrop((16, 16)),
341
+ Normalize(0.5, 0.5),
342
+ Rearrange("t c h w -> c t h w"),
343
+ ]
344
+ )
345
+
346
+ img = Image.open(video_path).convert("RGB")
347
+ img_tensor = T.ToTensor()(img).unsqueeze(0) # (1, C, H, W)
348
+ video = img_tensor.permute(0, 1, 2, 3) # (T=1, C, H, W)
349
+ print(f"Read Image size: {video.size()}")
350
+ output_dir = 'output/' + str(uuid.uuid4()) + '.png'
351
+ cond_latents.append(video_transform(video.to(torch.device("cuda"))))
352
+
353
+ ori_lengths = [video.size(1) for video in cond_latents]
354
+ input_videos = cond_latents
355
+ if is_video:
356
+ cond_latents = [cut_videos(video, sp_size) for video in cond_latents]
357
+
358
+ # runner.dit.to("cpu")
359
+ print(f"Encoding videos: {list(map(lambda x: x.size(), cond_latents))}")
360
+ # runner.vae.to(torch.device("cuda"))
361
+ cond_latents = runner.vae_encode(cond_latents)
362
+ # runner.vae.to("cpu")
363
+ # runner.dit.to(torch.device("cuda"))
364
+
365
+ for i, emb in enumerate(text_embeds["texts_pos"]):
366
+ text_embeds["texts_pos"][i] = emb.to(torch.device("cuda"))
367
+ for i, emb in enumerate(text_embeds["texts_neg"]):
368
+ text_embeds["texts_neg"][i] = emb.to(torch.device("cuda"))
369
+
370
+ samples = generation_step(runner, text_embeds, cond_latents=cond_latents)
371
+ # runner.dit.to("cpu")
372
+ del cond_latents
373
+
374
+ # dump samples to the output directory
375
+ for path, input, sample, ori_length in zip(
376
+ videos, input_videos, samples, ori_lengths
377
+ ):
378
+ if ori_length < sample.shape[0]:
379
+ sample = sample[:ori_length]
380
+ # color fix
381
+ input = (
382
+ rearrange(input[:, None], "c t h w -> t c h w")
383
+ if input.ndim == 3
384
+ else rearrange(input, "c t h w -> t c h w")
385
+ )
386
+ if use_colorfix:
387
+ sample = wavelet_reconstruction(
388
+ sample.to("cpu"), input[: sample.size(0)].to("cpu")
389
+ )
390
+ else:
391
+ sample = sample.to("cpu")
392
+ sample = (
393
+ rearrange(sample[:, None], "t c h w -> t h w c")
394
+ if sample.ndim == 3
395
+ else rearrange(sample, "t c h w -> t h w c")
396
+ )
397
+ sample = sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
398
+ sample = sample.to(torch.uint8).numpy()
399
+
400
+ if is_image:
401
+ mediapy.write_image(output_dir, sample[0])
402
+ else:
403
+ mediapy.write_video(
404
+ output_dir, sample, fps=fps_out
405
+ )
406
+
407
+ # print(f"Generated video size: {sample.shape}")
408
+ gc.collect()
409
+ torch.cuda.empty_cache()
410
+ if is_image:
411
+ return output_dir, None, output_dir
412
+ else:
413
+ return None, output_dir, output_dir
414
+
415
+
416
+ with gr.Blocks(title="SeedVR2: One-Step Video Restoration via Diffusion Adversarial Post-Training") as demo:
417
+ # Top logo and title
418
+ gr.HTML("""
419
+ <div style='text-align:center; margin-bottom: 10px;'>
420
+ <img src='https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/assets/seedvr_logo.png' style='height:40px;' alt='SeedVR logo'/>
421
+ </div>
422
+ <p><b>Official Gradio demo</b> for
423
+ <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>
424
+ <b>SeedVR2: One-Step Video Restoration via Diffusion Adversarial Post-Training</b></a>.<br>
425
+ 🔥 <b>SeedVR2</b> is a one-step image and video restoration algorithm for real-world and AIGC content.
426
+ </p>
427
+ """)
428
+
429
+ # Interface
430
+ with gr.Row():
431
+ input_video = gr.File(label="Upload image or video", type="filepath")
432
+ seed = gr.Number(label="Seeds", value=666)
433
+ fps = gr.Number(label="fps", value=24)
434
+
435
+ with gr.Row():
436
+ output_video = gr.Video(label="Output_Video")
437
+ output_image = gr.Image(label="Output_Image")
438
+ download_link = gr.File(label="Download the output")
439
+
440
+ run_button = gr.Button("Run")
441
+ run_button.click(fn=generation_loop, inputs=[input_video, seed, fps], outputs=[output_image, output_video, download_link])
442
+
443
+ # Examples
444
+ gr.Examples(
445
+ examples=[
446
+ ["./01.mp4", 4, 24],
447
+ ["./02.mp4", 4, 24],
448
+ ["./03.mp4", 4, 24],
449
+ ],
450
+ inputs=[input_video, seed, fps]
451
+ )
452
+
453
+ # Article/Footer
454
+ gr.HTML("""
455
+ <hr>
456
+ <p>If you find SeedVR helpful, please ⭐ the
457
+ <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>GitHub repository</a>:</p>
458
+
459
+ <a href="https://github.com/ByteDance-Seed/SeedVR" target="_blank">
460
+ <img src="https://img.shields.io/github/stars/ByteDance-Seed/SeedVR?style=social" alt="GitHub Stars">
461
+ </a>
462
+
463
+ <h4>Notice</h4>
464
+ <p>This demo supports up to <b>720p and 121 frames for videos or 2k images</b>.
465
+ For other use cases (image restoration beyond 2K, video resolutions beyond 720p, etc), check the <a href='https://github.com/ByteDance-Seed/SeedVR' target='_blank'>GitHub repo</a>.</p>
466
+
467
+ <h4>Limitations</h4>
468
+ <p>May fail on heavy degradations or small-motion AIGC clips, causing oversharpening or poor restoration.</p>
469
+
470
+ <h4>Citation</h4>
471
+ <pre style="font-size: 12px;">
472
+ @article{wang2025seedvr2,
473
+ title={SeedVR2: One-Step Video Restoration via Diffusion Adversarial Post-Training},
474
+ author={Wang, Jianyi and Lin, Shanchuan and Lin, Zhijie and Ren, Yuxi and Wei, Meng and Yue, Zongsheng and Zhou, Shangchen and Chen, Hao and Zhao, Yang and Yang, Ceyuan and Xiao, Xuefeng and Loy, Chen Change and Jiang, Lu},
475
+ booktitle={arXiv preprint arXiv:2506.05301},
476
+ year={2025}
477
+ }
478
+
479
+ @inproceedings{wang2025seedvr,
480
+ title={SeedVR: Seeding Infinity in Diffusion Transformer Towards Generic Video Restoration},
481
+ author={Wang, Jianyi and Lin, Zhijie and Wei, Meng and Zhao, Yang and Yang, Ceyuan and Loy, Chen Change and Jiang, Lu},
482
+ booktitle={CVPR},
483
+ year={2025}
484
+ }
485
+ </pre>
486
+
487
+ <h4>License</h4>
488
+ <p>Licensed under the
489
+ <a href="http://www.apache.org/licenses/LICENSE-2.0" target="_blank">Apache 2.0 License</a>.</p>
490
+
491
+ <h4>Contact</h4>
492
+ <p>Email: <b>iceclearwjy@gmail.com</b></p>
493
+
494
+ <p>
495
+ <a href="https://twitter.com/Iceclearwjy">
496
+ <img src="https://img.shields.io/twitter/follow/Iceclearwjy?label=%40Iceclearwjy&style=social" alt="Twitter Follow">
497
+ </a>
498
+ <a href="https://github.com/IceClear">
499
+ <img src="https://img.shields.io/github/followers/IceClear?style=social" alt="GitHub Follow">
500
+ </a>
501
+ </p>
502
+
503
+ <p style="text-align:center;">
504
+ <img src="https://visitor-badge.laobi.icu/badge?page_id=ByteDance-Seed/SeedVR" alt="visitors">
505
+ </p>
506
+ """)
507
+
508
+ demo.queue()
509
+ demo.launch()