Fabrice-TIERCELIN commited on
Commit
e0f8b48
·
verified ·
1 Parent(s): b212faf

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +15 -15
  2. app.py +0 -0
  3. app_endframe.py +1 -3
  4. requirements.txt +23 -40
README.md CHANGED
@@ -1,21 +1,21 @@
1
  ---
2
- title: SUPIR Image Upscaler
 
 
 
3
  sdk: gradio
4
- emoji: 📷
5
- sdk_version: 4.38.1
6
- app_file: app.py
7
- license: mit
8
- colorFrom: blue
9
- colorTo: pink
10
  tags:
11
- - Upscaling
12
- - Restoring
13
- - Image-to-Image
14
- - Image-2-Image
15
- - Img-to-Img
16
- - Img-2-Img
17
  - language models
18
  - LLMs
19
- short_description: Restore blurred or small images with prompt
20
  suggested_hardware: zero-a10g
21
- ---
 
 
 
1
  ---
2
+ title: FramePack/HunyuanVideo
3
+ emoji: 🎥
4
+ colorFrom: pink
5
+ colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.29.1
8
+ app_file: app_endframe.py
9
+ license: apache-2.0
10
+ short_description: Text-to-Video/Image-to-Video/Video extender
 
 
11
  tags:
12
+ - Image-to-Video
13
+ - Image-2-Video
14
+ - Img-to-Vid
15
+ - Img-2-Vid
 
 
16
  - language models
17
  - LLMs
 
18
  suggested_hardware: zero-a10g
19
+ ---
20
+
21
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
The diff for this file is too large to render. See raw diff
 
app_endframe.py CHANGED
@@ -496,7 +496,7 @@ def worker(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, bat
496
 
497
  # 20250509 pftq: Dynamic frame allocation like original num_clean_frames, fix split error
498
  available_frames = video_latents.shape[2] if is_start_of_video else history_latents.shape[2]
499
- if is_start_of_video:
500
  effective_clean_frames = 1 # avoid jumpcuts from input video
501
  else:
502
  effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 1
@@ -632,8 +632,6 @@ def worker(input_video, end_frame, end_frame_weight, prompt, n_prompt, seed, bat
632
  break
633
 
634
  history_pixels = torch.cat([input_video_pixels, history_pixels], dim=2)
635
- #overlapped_frames = latent_window_size * 4 - 3
636
- #history_pixels = soft_append_bcthw(input_video_pixels, history_pixels, overlapped_frames)
637
 
638
  output_filename = os.path.join(outputs_folder, f'{job_id}_final.mp4')
639
  save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
 
496
 
497
  # 20250509 pftq: Dynamic frame allocation like original num_clean_frames, fix split error
498
  available_frames = video_latents.shape[2] if is_start_of_video else history_latents.shape[2]
499
+ if is_start_of_video and False:
500
  effective_clean_frames = 1 # avoid jumpcuts from input video
501
  else:
502
  effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 1
 
632
  break
633
 
634
  history_pixels = torch.cat([input_video_pixels, history_pixels], dim=2)
 
 
635
 
636
  output_filename = os.path.join(outputs_folder, f'{job_id}_final.mp4')
637
  save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
requirements.txt CHANGED
@@ -1,41 +1,24 @@
1
- pydantic==2.10.6
2
- fastapi==0.115.8
3
- gradio_imageslider==0.0.20
4
- gradio_client==1.7.0
5
- numpy==1.26.4
6
- requests==2.32.3
7
- sentencepiece==0.2.0
8
- tokenizers==0.19.1
9
- torchvision==0.22.0
10
- uvicorn==0.30.1
11
- wandb==0.17.4
12
- httpx==0.27.0
13
- transformers==4.42.4
14
- accelerate==0.32.1
15
- scikit-learn==1.5.1
16
- einops==0.8.0
17
- einops-exts==0.0.4
18
- timm==1.0.7
19
- openai-clip==1.0.1
20
- fsspec==2024.6.1
21
- kornia==0.7.3
22
- matplotlib==3.9.1
23
- ninja==1.11.1.1
24
- omegaconf==2.3.0
25
- opencv-python==4.11.0.86
26
- pandas==2.3.0
27
- pillow==11.2.1
28
- pytorch-lightning==2.5.1.post0
29
- PyYAML==6.0.2
30
- scipy==1.15.3
31
- tqdm==4.67.1
32
- triton==3.3.0
33
- urllib3==2.4.0
34
- webdataset==0.2.111
35
- xformers==0.0.30
36
- facexlib==0.3.0
37
- k-diffusion==0.1.1.post1
38
  diffusers==0.33.1
39
- pillow-heif==0.22.0
40
-
41
- open-clip-torch==2.24.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==1.6.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  diffusers==0.33.1
3
+ transformers==4.46.2
4
+ sentencepiece==0.2.0
5
+ pillow==11.1.0
6
+ av==12.1.0
7
+ numpy==1.26.2
8
+ scipy==1.12.0
9
+ requests==2.31.0
10
+ torchsde==0.2.6
11
+ torch>=2.0.0
12
+ torchvision
13
+ torchaudio
14
+ einops
15
+ opencv-contrib-python
16
+ safetensors
17
+ huggingface_hub
18
+ spaces
19
+ decord
20
+ imageio_ffmpeg
21
+ sageattention
22
+ xformers==0.0.29.post3
23
+ bitsandbytes
24
+ pillow-heif==0.22.0