Sushantkas commited on
Commit
bc73034
·
verified ·
1 Parent(s): 58a4d85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -16
app.py CHANGED
@@ -2,32 +2,24 @@ import spaces
2
  import gradio as gr
3
  import torch
4
  import numpy as np
5
- from diffusers import AutoencoderKLWan, WanImageToVideoPipeline
6
  from diffusers.utils import export_to_video, load_image
7
  from transformers import CLIPVisionModel
8
 
9
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
-
11
- ## Loading Encoder
12
- model_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
13
- print(f"Using video Model: {model_id}")
14
 
15
 
16
- print("###### Loading image encoder ######")
17
 
18
- image_encoder = CLIPVisionModel.from_pretrained(
19
- model_id, subfolder="image_encoder", torch_dtype=torch.float32
20
- )
21
 
22
 
23
- print("###### Loading VAE encoder ######")
24
- vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
25
 
26
- print("Loading Pipeline...")
 
 
27
 
28
- pipe = WanImageToVideoPipeline.from_pretrained(
29
- model_id, vae=vae, image_encoder=image_encoder, torch_dtype=torch.bfloat16
30
- )
31
 
32
  try:
33
  pipe.to(device)
 
2
  import gradio as gr
3
  import torch
4
  import numpy as np
5
+ from diffusers import WanImageToVideoPipeline
6
  from diffusers.utils import export_to_video, load_image
7
  from transformers import CLIPVisionModel
8
 
 
 
 
 
 
9
 
10
 
11
+ ## Loading Encoder
12
 
 
 
 
13
 
14
 
15
+ model_id = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
 
16
 
17
+ print(f"Using video Model: {model_id}")
18
+ dtype = torch.bfloat16
19
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
 
21
+ pipe = WanImageToVideoPipeline.from_pretrained(model_id, torch_dtype=dtype)
22
+ pipe.to(device)
 
23
 
24
  try:
25
  pipe.to(device)