liamsch commited on
Commit
1f42424
·
1 Parent(s): cef415d

lazy load models

Browse files
Files changed (1) hide show
  1. gradio_demo.py +17 -7
gradio_demo.py CHANGED
@@ -58,25 +58,28 @@ c2w = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dty
58
 
59
 
60
  def initialize_models():
61
- """Initialize all models (called once at startup)."""
62
  global sheap_model, flame, fa_model
 
 
 
63
 
64
- print("Loading SHeaP model...")
65
  sheap_model = load_sheap_model(model_type="expressive").to(device)
66
  sheap_model.eval()
67
 
68
- print("Loading FLAME model...")
69
  flame_dir = Path("FLAME2020/")
70
  flame = TinyFlame(flame_dir / "generic_model.pt", eyelids_ckpt=flame_dir / "eyelids.pt").to(
71
  device
72
  )
73
 
74
- print("Loading face alignment model...")
75
  fa_model = face_alignment.FaceAlignment(
76
  face_alignment.LandmarksType.TWO_D, device=str(device), flip_input=False
77
  )
78
 
79
- print("Models loaded successfully!")
80
 
81
 
82
  @spaces.GPU
@@ -90,6 +93,9 @@ def process_image(image: np.ndarray) -> Image.Image:
90
  Returns:
91
  PIL Image with three views side-by-side (original, mesh, blended)
92
  """
 
 
 
93
  # Convert to torch tensor for face detection (C, H, W) format with values in [0, 1]
94
  image_tensor = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
95
 
@@ -148,6 +154,9 @@ def process_video(video_path: str, progress=gr.Progress()) -> str:
148
  """
149
  Process a video and return path to the rendered output video using background threads.
150
  """
 
 
 
151
  temp_dir = Path(tempfile.mkdtemp())
152
  render_size = 512
153
  try:
@@ -239,8 +248,9 @@ def process_input(image: Optional[np.ndarray], video: Optional[str]):
239
  raise ValueError("Please provide either an image or video!")
240
 
241
 
242
- # Initialize models on startup
243
- initialize_models()
 
244
 
245
  # Create Gradio interface
246
  with gr.Blocks(title="SHeaP Demo") as demo:
 
58
 
59
 
60
  def initialize_models():
61
+ """Initialize all models (called lazily on first use)."""
62
  global sheap_model, flame, fa_model
63
+
64
+ if sheap_model is not None:
65
+ return # Already initialized
66
 
67
+ print("Loading SHeaP model...", flush=True)
68
  sheap_model = load_sheap_model(model_type="expressive").to(device)
69
  sheap_model.eval()
70
 
71
+ print("Loading FLAME model...", flush=True)
72
  flame_dir = Path("FLAME2020/")
73
  flame = TinyFlame(flame_dir / "generic_model.pt", eyelids_ckpt=flame_dir / "eyelids.pt").to(
74
  device
75
  )
76
 
77
+ print("Loading face alignment model...", flush=True)
78
  fa_model = face_alignment.FaceAlignment(
79
  face_alignment.LandmarksType.TWO_D, device=str(device), flip_input=False
80
  )
81
 
82
+ print("Models loaded successfully!", flush=True)
83
 
84
 
85
  @spaces.GPU
 
93
  Returns:
94
  PIL Image with three views side-by-side (original, mesh, blended)
95
  """
96
+ # Initialize models on first use (lazy loading for @spaces.GPU)
97
+ initialize_models()
98
+
99
  # Convert to torch tensor for face detection (C, H, W) format with values in [0, 1]
100
  image_tensor = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
101
 
 
154
  """
155
  Process a video and return path to the rendered output video using background threads.
156
  """
157
+ # Initialize models on first use (lazy loading for @spaces.GPU)
158
+ initialize_models()
159
+
160
  temp_dir = Path(tempfile.mkdtemp())
161
  render_size = 512
162
  try:
 
248
  raise ValueError("Please provide either an image or video!")
249
 
250
 
251
+ # Don't initialize models at startup when using @spaces.GPU
252
+ # They will be loaded lazily on first use
253
+ # initialize_models()
254
 
255
  # Create Gradio interface
256
  with gr.Blocks(title="SHeaP Demo") as demo: