root commited on
Commit
7c65853
·
1 Parent(s): 4738bab

updating paths

Browse files
Files changed (1) hide show
  1. handler.py +13 -12
handler.py CHANGED
@@ -47,35 +47,36 @@ class EndpointHandler():
47
 
48
  vae = AutoencoderKL.from_pretrained(config_path).to(device, dtype=self.weight_dtype)
49
 
50
-
51
  pretrained_base_model_path = os.path.join(base_dir, 'pretrained_weights', 'stable-diffusion-v1-5')
52
 
53
- # Ensure the path exists
54
- if not os.path.exists(pretrained_base_model_path):
55
- raise FileNotFoundError(f"The folder was not found at: {pretrained_base_model_path}")
56
-
57
  reference_unet = UNet2DConditionModel.from_pretrained(
58
  pretrained_base_model_path,
59
  subfolder="unet"
60
  ).to(device, dtype=self.weight_dtype)
61
 
62
- inference_config_path = self.config.inference_config
 
 
 
 
 
 
63
  infer_config = OmegaConf.load(inference_config_path)
64
  denoising_unet = UNet3DConditionModel.from_pretrained_2d(
65
- self.config.pretrained_base_model_path,
66
- self.config.motion_module_path,
67
  subfolder="unet",
68
  unet_additional_kwargs=infer_config.unet_additional_kwargs,
69
  ).to(device, dtype=self.weight_dtype)
70
 
71
  pose_guider = PoseGuider(320, block_out_channels=(16, 32, 96, 256)).to(device, dtype=self.weight_dtype)
72
- image_enc = CLIPVisionModelWithProjection.from_pretrained(self.config.image_encoder_path).to(device, dtype=self.weight_dtype)
73
  sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)
74
  scheduler = DDIMScheduler(**sched_kwargs)
75
 
76
- denoising_unet.load_state_dict(torch.load(self.config.denoising_unet_path, map_location="cpu"), strict=False)
77
- reference_unet.load_state_dict(torch.load(self.config.reference_unet_path, map_location="cpu"))
78
- pose_guider.load_state_dict(torch.load(self.config.pose_guider_path, map_location="cpu"))
79
 
80
  self.pipeline = Pose2VideoPipeline(
81
  vae=vae,
 
47
 
48
  vae = AutoencoderKL.from_pretrained(config_path).to(device, dtype=self.weight_dtype)
49
 
 
50
  pretrained_base_model_path = os.path.join(base_dir, 'pretrained_weights', 'stable-diffusion-v1-5')
51
 
 
 
 
 
52
  reference_unet = UNet2DConditionModel.from_pretrained(
53
  pretrained_base_model_path,
54
  subfolder="unet"
55
  ).to(device, dtype=self.weight_dtype)
56
 
57
+ inference_config_path = os.path.join(base_dir, 'configs', 'inference', 'inference_v2.yaml')
58
+ motion_module_path = os.path.join(base_dir, 'pretrained_weights', 'motion_module.pth')
59
+ denoising_unet_path = os.path.join(base_dir, 'pretrained_weights', 'denoising_unet.pth')
60
+ reference_unet_path = os.path.join(base_dir, 'pretrained_weights', 'reference_unet.pth')
61
+ pose_guider_path = os.path.join(base_dir, 'pretrained_weights', 'pose_guider.pth')
62
+ image_encoder_path = os.path.join(base_dir, 'pretrained_weights', 'image_encoder')
63
+
64
  infer_config = OmegaConf.load(inference_config_path)
65
  denoising_unet = UNet3DConditionModel.from_pretrained_2d(
66
+ pretrained_base_model_path,
67
+ motion_module_path,
68
  subfolder="unet",
69
  unet_additional_kwargs=infer_config.unet_additional_kwargs,
70
  ).to(device, dtype=self.weight_dtype)
71
 
72
  pose_guider = PoseGuider(320, block_out_channels=(16, 32, 96, 256)).to(device, dtype=self.weight_dtype)
73
+ image_enc = CLIPVisionModelWithProjection.from_pretrained(image_encoder_path).to(device, dtype=self.weight_dtype)
74
  sched_kwargs = OmegaConf.to_container(infer_config.noise_scheduler_kwargs)
75
  scheduler = DDIMScheduler(**sched_kwargs)
76
 
77
+ denoising_unet.load_state_dict(torch.load(denoising_unet_path, map_location="cpu"), strict=False)
78
+ reference_unet.load_state_dict(torch.load(reference_unet_path, map_location="cpu"))
79
+ pose_guider.load_state_dict(torch.load(pose_guider_path, map_location="cpu"))
80
 
81
  self.pipeline = Pose2VideoPipeline(
82
  vae=vae,