Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -85,13 +85,13 @@ def infer(prompt, video_path, condition, video_length, is_long_video):
|
|
| 85 |
else:
|
| 86 |
annotator = controlnet_parser_dict[condition]()
|
| 87 |
|
| 88 |
-
tokenizer = CLIPTokenizer.from_pretrained(sd_path, subfolder="tokenizer"
|
| 89 |
-
text_encoder = CLIPTextModel.from_pretrained(sd_path, subfolder="text_encoder"
|
| 90 |
-
vae = AutoencoderKL.from_pretrained(sd_path, subfolder="vae"
|
| 91 |
-
unet = UNet3DConditionModel.from_pretrained_2d(sd_path, subfolder="unet"
|
| 92 |
controlnet = ControlNetModel3D.from_pretrained_2d(controlnet_dict[condition]).to(dtype=torch.float16)
|
| 93 |
interpolater = IFNet(ckpt_path=inter_path).to(dtype=torch.float16)
|
| 94 |
-
scheduler=DDIMScheduler.from_pretrained(sd_path, subfolder="scheduler"
|
| 95 |
|
| 96 |
pipe = ControlVideoPipeline(
|
| 97 |
vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,
|
|
|
|
| 85 |
else:
|
| 86 |
annotator = controlnet_parser_dict[condition]()
|
| 87 |
|
| 88 |
+
tokenizer = CLIPTokenizer.from_pretrained(sd_path, subfolder="tokenizer")
|
| 89 |
+
text_encoder = CLIPTextModel.from_pretrained(sd_path, subfolder="text_encoder").to(dtype=torch.float16)
|
| 90 |
+
vae = AutoencoderKL.from_pretrained(sd_path, subfolder="vae").to(dtype=torch.float16)
|
| 91 |
+
unet = UNet3DConditionModel.from_pretrained_2d(sd_path, subfolder="unet").to(dtype=torch.float16)
|
| 92 |
controlnet = ControlNetModel3D.from_pretrained_2d(controlnet_dict[condition]).to(dtype=torch.float16)
|
| 93 |
interpolater = IFNet(ckpt_path=inter_path).to(dtype=torch.float16)
|
| 94 |
+
scheduler=DDIMScheduler.from_pretrained(sd_path, subfolder="scheduler")
|
| 95 |
|
| 96 |
pipe = ControlVideoPipeline(
|
| 97 |
vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet,
|