wlyu-adobe commited on
Commit
5ca8dc1
·
1 Parent(s): a91a76e

Add ZeroGPU duration parameter and improve error handling

Browse files
Files changed (1) hide show
  1. app.py +46 -38
app.py CHANGED
@@ -98,40 +98,46 @@ class FaceLiftPipeline:
98
 
99
  # Load models (keep on CPU for ZeroGPU compatibility)
100
  print("Loading models...")
101
- self.mvdiffusion_pipeline = StableUnCLIPImg2ImgPipeline.from_pretrained(
102
- str(workspace_dir / "checkpoints/mvdiffusion/pipeckpts"),
103
- torch_dtype=torch.float16,
104
- )
105
- # Don't move to device or enable xformers here - will be done in GPU-decorated function
106
- self._models_on_gpu = False
107
-
108
- with open(workspace_dir / "configs/gslrm.yaml", "r") as f:
109
- config = edict(yaml.safe_load(f))
110
-
111
- module_name, class_name = config.model.class_name.rsplit(".", 1)
112
- module = __import__(module_name, fromlist=[class_name])
113
- ModelClass = getattr(module, class_name)
114
-
115
- self.gs_lrm_model = ModelClass(config)
116
- checkpoint = torch.load(
117
- workspace_dir / "checkpoints/gslrm/ckpt_0000000000021125.pt",
118
- map_location="cpu"
119
- )
120
- # Filter out loss_calculator weights (training-only, not needed for inference)
121
- state_dict = {k: v for k, v in checkpoint["model"].items()
122
- if not k.startswith("loss_calculator.")}
123
- self.gs_lrm_model.load_state_dict(state_dict)
124
- # Keep on CPU initially - will move to GPU in decorated function
125
-
126
- self.color_prompt_embedding = torch.load(
127
- workspace_dir / "mvdiffusion/fixed_prompt_embeds_6view/clr_embeds.pt",
128
- map_location="cpu"
129
- )
130
-
131
- with open(workspace_dir / "utils_folder/opencv_cameras.json", 'r') as f:
132
- self.cameras_data = json.load(f)["frames"]
133
-
134
- print("Models loaded successfully!")
 
 
 
 
 
 
135
 
136
  def _move_models_to_gpu(self):
137
  """Move models to GPU and enable optimizations. Called within @spaces.GPU context."""
@@ -145,14 +151,13 @@ class FaceLiftPipeline:
145
  self._models_on_gpu = True
146
  print("Models on GPU, xformers enabled!")
147
 
148
- @spaces.GPU
149
  def generate_3d_head(self, image_path, auto_crop=True, guidance_scale=3.0,
150
  random_seed=4, num_steps=50):
151
  """Generate 3D head from single image."""
152
- # Move models to GPU now that we're in the GPU context
153
- self._move_models_to_gpu()
154
-
155
  try:
 
 
156
  # Setup output directory
157
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
158
  output_dir = self.output_dir / timestamp
@@ -260,6 +265,9 @@ class FaceLiftPipeline:
260
  str(turntable_path), str(ply_path)
261
 
262
  except Exception as e:
 
 
 
263
  raise gr.Error(f"Generation failed: {str(e)}")
264
 
265
 
 
98
 
99
  # Load models (keep on CPU for ZeroGPU compatibility)
100
  print("Loading models...")
101
+ try:
102
+ self.mvdiffusion_pipeline = StableUnCLIPImg2ImgPipeline.from_pretrained(
103
+ str(workspace_dir / "checkpoints/mvdiffusion/pipeckpts"),
104
+ torch_dtype=torch.float16,
105
+ )
106
+ # Don't move to device or enable xformers here - will be done in GPU-decorated function
107
+ self._models_on_gpu = False
108
+
109
+ with open(workspace_dir / "configs/gslrm.yaml", "r") as f:
110
+ config = edict(yaml.safe_load(f))
111
+
112
+ module_name, class_name = config.model.class_name.rsplit(".", 1)
113
+ module = __import__(module_name, fromlist=[class_name])
114
+ ModelClass = getattr(module, class_name)
115
+
116
+ self.gs_lrm_model = ModelClass(config)
117
+ checkpoint = torch.load(
118
+ workspace_dir / "checkpoints/gslrm/ckpt_0000000000021125.pt",
119
+ map_location="cpu"
120
+ )
121
+ # Filter out loss_calculator weights (training-only, not needed for inference)
122
+ state_dict = {k: v for k, v in checkpoint["model"].items()
123
+ if not k.startswith("loss_calculator.")}
124
+ self.gs_lrm_model.load_state_dict(state_dict)
125
+ # Keep on CPU initially - will move to GPU in decorated function
126
+
127
+ self.color_prompt_embedding = torch.load(
128
+ workspace_dir / "mvdiffusion/fixed_prompt_embeds_6view/clr_embeds.pt",
129
+ map_location="cpu"
130
+ )
131
+
132
+ with open(workspace_dir / "utils_folder/opencv_cameras.json", 'r') as f:
133
+ self.cameras_data = json.load(f)["frames"]
134
+
135
+ print("Models loaded successfully!")
136
+ except Exception as e:
137
+ print(f"Error loading models: {e}")
138
+ import traceback
139
+ traceback.print_exc()
140
+ raise
141
 
142
  def _move_models_to_gpu(self):
143
  """Move models to GPU and enable optimizations. Called within @spaces.GPU context."""
 
151
  self._models_on_gpu = True
152
  print("Models on GPU, xformers enabled!")
153
 
154
+ @spaces.GPU(duration=120)
155
  def generate_3d_head(self, image_path, auto_crop=True, guidance_scale=3.0,
156
  random_seed=4, num_steps=50):
157
  """Generate 3D head from single image."""
 
 
 
158
  try:
159
+ # Move models to GPU now that we're in the GPU context
160
+ self._move_models_to_gpu()
161
  # Setup output directory
162
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
163
  output_dir = self.output_dir / timestamp
 
265
  str(turntable_path), str(ply_path)
266
 
267
  except Exception as e:
268
+ import traceback
269
+ error_details = traceback.format_exc()
270
+ print(f"Error details:\n{error_details}")
271
  raise gr.Error(f"Generation failed: {str(e)}")
272
 
273