testcoder-ui commited on
Commit
a613a4c
·
1 Parent(s): 53da786

feat: add generation time display

Browse files

- Added response time tracking in infer_script.py
- Display actual generation time in UI with formatted output
- Shows time in seconds or minutes based on duration

Files changed (2) hide show
  1. app.py +13 -3
  2. infer_script.py +18 -3
app.py CHANGED
@@ -251,7 +251,7 @@ def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, of
251
 
252
  try:
253
  print("📞 Calling run_inference...")
254
- status, oo_video_path, all_video_path = run_inference(
255
  config_path=INFER_CONFIG_PATH,
256
  model_path=MODEL_PATH,
257
  input_path=input_img,
@@ -274,6 +274,7 @@ def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, of
274
  interpolation_factor=intropolate_factor
275
  )
276
  print(f"✅ Inference completed! Status: {status}")
 
277
  except Exception as e:
278
  print(f"❌ Error in run_inference: {str(e)}")
279
  import traceback
@@ -296,7 +297,15 @@ def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, of
296
  else:
297
  print(f"Directory with frames not found: {frames_dir}")
298
 
299
- return status, oo_video_path, all_video_path, frames_archive
 
 
 
 
 
 
 
 
300
 
301
  with gr.Blocks() as demo:
302
  gr.Markdown("<h1 style='text-align: center;'>FollowYourEmoji</h1>")
@@ -378,6 +387,7 @@ with gr.Blocks() as demo:
378
  with gr.Column(scale=1):
379
  gr.Markdown("### 🎨 Generated Results")
380
  result_status = gr.Label(value="Ready to generate")
 
381
  result_video = gr.Video(label="Result Video (Main)", interactive=False, height=400)
382
  result_video_2 = gr.Video(label="Result Video (Full)", interactive=False, height=400)
383
  frames_output = gr.File(label="Download Frames Archive")
@@ -460,7 +470,7 @@ with gr.Blocks() as demo:
460
  inputs=[input_img, crop_face_checkbox, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
461
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
462
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
463
- outputs=[result_status, result_video, result_video_2, frames_output],
464
  show_progress="full"
465
  )
466
 
 
251
 
252
  try:
253
  print("📞 Calling run_inference...")
254
+ status, oo_video_path, all_video_path, elapsed_time = run_inference(
255
  config_path=INFER_CONFIG_PATH,
256
  model_path=MODEL_PATH,
257
  input_path=input_img,
 
274
  interpolation_factor=intropolate_factor
275
  )
276
  print(f"✅ Inference completed! Status: {status}")
277
+ print(f"⏱️ Total time: {elapsed_time:.2f} seconds")
278
  except Exception as e:
279
  print(f"❌ Error in run_inference: {str(e)}")
280
  import traceback
 
297
  else:
298
  print(f"Directory with frames not found: {frames_dir}")
299
 
300
+ # Format timing display
301
+ if elapsed_time < 60:
302
+ time_display = f"⏱️ {elapsed_time:.2f} seconds"
303
+ else:
304
+ minutes = int(elapsed_time // 60)
305
+ seconds = elapsed_time % 60
306
+ time_display = f"⏱️ {minutes} min {seconds:.2f} sec"
307
+
308
+ return status, time_display, oo_video_path, all_video_path, frames_archive
309
 
310
  with gr.Blocks() as demo:
311
  gr.Markdown("<h1 style='text-align: center;'>FollowYourEmoji</h1>")
 
387
  with gr.Column(scale=1):
388
  gr.Markdown("### 🎨 Generated Results")
389
  result_status = gr.Label(value="Ready to generate")
390
+ result_time = gr.Label(value="Response time will appear here", label="⏱️ Generation Time")
391
  result_video = gr.Video(label="Result Video (Main)", interactive=False, height=400)
392
  result_video_2 = gr.Video(label="Result Video (Full)", interactive=False, height=400)
393
  frames_output = gr.File(label="Download Frames Archive")
 
470
  inputs=[input_img, crop_face_checkbox, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
471
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
472
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
473
+ outputs=[result_status, result_time, result_video, result_video_2, frames_output],
474
  show_progress="full"
475
  )
476
 
infer_script.py CHANGED
@@ -172,6 +172,8 @@ def visualize(dataloader, pipeline, generator, W, H, video_length, num_inference
172
  def infer(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
173
  resolution_w, resolution_h, video_length, num_inference_steps, guidance_scale, output_fps, show_stats,
174
  anomaly_action, callback_steps, context_frames, context_stride, context_overlap, context_batch_size,interpolation_factor):
 
 
175
 
176
  config = OmegaConf.load(config_path)
177
  config.init_checkpoint = model_path
@@ -274,7 +276,18 @@ def infer(config_path, model_path, input_path, lmk_path, output_path, model_step
274
  torch.cuda.empty_cache()
275
  gc.collect()
276
 
277
- return "Inference completed successfully", oo_video_path, all_video_path
 
 
 
 
 
 
 
 
 
 
 
278
 
279
  def run_inference(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
280
  resolution_w, resolution_h, video_length, num_inference_steps=30, guidance_scale=3.5, output_fps=30,
@@ -285,9 +298,10 @@ def run_inference(config_path, model_path, input_path, lmk_path, output_path, mo
285
  torch.cuda.empty_cache()
286
  gc.collect()
287
 
288
- return infer(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
289
  resolution_w, resolution_h, video_length, num_inference_steps, guidance_scale, output_fps,
290
  show_stats, anomaly_action, callback_steps, context_frames, context_stride, context_overlap, context_batch_size,interpolation_factor)
 
291
  finally:
292
  torch.cuda.empty_cache()
293
  gc.collect()
@@ -320,7 +334,7 @@ if __name__ == "__main__":
320
 
321
  args = parser.parse_args()
322
 
323
- status, oo_path, all_path = run_inference(
324
  args.config, args.model, args.input, args.lmk, args.output, args.step, args.seed,
325
  args.width, args.height, args.length, args.steps, args.guidance, args.fps,
326
  args.show_stats, args.anomaly_action, args.callback_steps, args.context_frames,
@@ -330,3 +344,4 @@ if __name__ == "__main__":
330
  print(status)
331
  print(f"Output video (only output): {oo_path}")
332
  print(f"Output video (all frames): {all_path}")
 
 
172
  def infer(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
173
  resolution_w, resolution_h, video_length, num_inference_steps, guidance_scale, output_fps, show_stats,
174
  anomaly_action, callback_steps, context_frames, context_stride, context_overlap, context_batch_size,interpolation_factor):
175
+ import time
176
+ start_time = time.time()
177
 
178
  config = OmegaConf.load(config_path)
179
  config.init_checkpoint = model_path
 
276
  torch.cuda.empty_cache()
277
  gc.collect()
278
 
279
+ end_time = time.time()
280
+ elapsed_time = end_time - start_time
281
+
282
+ # Format time nicely
283
+ if elapsed_time < 60:
284
+ time_str = f"{elapsed_time:.2f} seconds"
285
+ else:
286
+ minutes = int(elapsed_time // 60)
287
+ seconds = elapsed_time % 60
288
+ time_str = f"{minutes} min {seconds:.2f} sec"
289
+
290
+ return f"Inference completed successfully in {time_str}", oo_video_path, all_video_path, elapsed_time
291
 
292
  def run_inference(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
293
  resolution_w, resolution_h, video_length, num_inference_steps=30, guidance_scale=3.5, output_fps=30,
 
298
  torch.cuda.empty_cache()
299
  gc.collect()
300
 
301
+ status, oo_video_path, all_video_path, elapsed_time = infer(config_path, model_path, input_path, lmk_path, output_path, model_step, seed,
302
  resolution_w, resolution_h, video_length, num_inference_steps, guidance_scale, output_fps,
303
  show_stats, anomaly_action, callback_steps, context_frames, context_stride, context_overlap, context_batch_size,interpolation_factor)
304
+ return status, oo_video_path, all_video_path, elapsed_time
305
  finally:
306
  torch.cuda.empty_cache()
307
  gc.collect()
 
334
 
335
  args = parser.parse_args()
336
 
337
+ status, oo_path, all_path, elapsed_time = run_inference(
338
  args.config, args.model, args.input, args.lmk, args.output, args.step, args.seed,
339
  args.width, args.height, args.length, args.steps, args.guidance, args.fps,
340
  args.show_stats, args.anomaly_action, args.callback_steps, args.context_frames,
 
344
  print(status)
345
  print(f"Output video (only output): {oo_path}")
346
  print(f"Output video (all frames): {all_path}")
347
+ print(f"Total generation time: {elapsed_time:.2f} seconds")