learnmlf commited on
Commit
561bc60
Β·
1 Parent(s): 280372d

feat: refactor

Browse files
Files changed (1) hide show
  1. app.py +70 -34
app.py CHANGED
@@ -15,10 +15,24 @@ import imageio
15
  from media_pipe.draw_util import FaceMeshVisualizer
16
 
17
  from download_models import download
 
18
 
19
  # Download models and check for exists
20
  download()
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  PROCESSED_VIDEO_DIR = './processed_videos'
23
  TEMP_DIR = './temp'
24
  INFER_CONFIG_PATH = './configs/infer.yaml'
@@ -255,46 +269,47 @@ def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, of
255
  return status, oo_video_path, all_video_path, frames_archive
256
 
257
  with gr.Blocks() as demo:
258
- gr.Markdown("# FollowYourEmoji Webui")
259
-
260
- gr.Markdown("""
261
- ## πŸ“– How to Use This Demo
262
 
263
- Follow these simple steps to create your animated emoji video:
 
 
 
 
 
 
 
264
 
265
- 1. **πŸ“Έ Upload Reference Image**: Upload a portrait photo in the left panel
266
- 2. **βœ‚οΈ Crop Face (Optional)**: Enable face cropping to automatically fit the image to match the video motion
267
- 3. **🎬 Upload Reference Video**: Upload a video or select a preprocessed .npy file in the middle panel
268
- 4. **πŸ‘οΈ Preview Animation**: Click "Show Animation preview" to see how the motion will look
269
- 5. **βš™οΈ Adjust Settings**: Fine-tune generation parameters at the bottom (steps, CFG scale, FPS, etc.)
270
- 6. **🎨 Generate Video**: Click "Generate Video" to create your animated result
271
 
272
- πŸ’‘ **Tips**:
273
- - Use face cropping if your portrait is too far away or doesn't align well
274
- - Preview the animation before generating to ensure the motion looks good
275
- - Try the examples below to get started quickly!
276
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
  # Main Layout: 3 columns - Image, Video, Results
279
  with gr.Row():
280
- # Left Column: Image Upload and Crop Face
281
  with gr.Column(scale=1):
282
  gr.Markdown("### πŸ“Έ Reference Image")
283
  input_img = gr.Image(label="Upload reference image", type="filepath", height=400)
284
-
285
- crop_face_checkbox = gr.Checkbox(label="Crop face according to video",info="If your picture is too far away or the face doesn't fit you can use cropping", value=False)
286
- with gr.Accordion("Face Cropping Settings", open=False):
287
- expand_x = gr.Slider(label="Expand X", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
288
- expand_y = gr.Slider(label="Expand Y", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
289
- offset_x = gr.Slider(label="Offset X", minimum=-1, maximum=1, value=0.0, step=0.01)
290
- offset_y = gr.Slider(label="Offset Y", minimum=-1, maximum=1, value=0.0, step=0.01)
291
-
292
- preview_crop_btn = gr.Button(value="Preview Crop", variant="secondary")
293
- with gr.Row():
294
- crop_preview = gr.Image(label="Crop Preview", height=200)
295
- crop_preview_motion = gr.Image(label="Motion Preview", height=200)
296
 
297
- # Middle Column: Video Input and Animation Preview
298
  with gr.Column(scale=1):
299
  gr.Markdown("### 🎬 Reference Video")
300
  input_video_type = gr.Radio(label="Input type", choices=["video","npy"], value="video")
@@ -308,6 +323,19 @@ with gr.Blocks() as demo:
308
  input_npy_refresh = gr.Button(value="Refresh NPY List")
309
  input_npy = gr.File(file_types=[".npy"], label="Upload .npy file")
310
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  with gr.Accordion("Animation Preview", open=False):
312
  show_gif_btn = gr.Button(value="Show Animation Preview", variant="secondary")
313
  with gr.Row():
@@ -366,13 +394,19 @@ with gr.Blocks() as demo:
366
  input_video.change(
367
  fn=process_input_video,
368
  inputs=[input_video, input_video_save],
369
- outputs=[result_status, input_npy, input_video_frames, output_fps_info]
 
370
  )
371
 
372
  input_npy_select.change(fn=update_frame_count, inputs=[input_npy_select], outputs=[input_video_frames])
373
  input_npy.change(fn=update_frame_count, inputs=[input_npy], outputs=[input_video_frames])
374
 
375
- show_gif_btn.click(fn=show_gif_for_npy, inputs=[input_npy_select, input_video], outputs=[gif_output, gif_output_align, result_status])
 
 
 
 
 
376
 
377
  input_video.change(
378
  fn=update_gif_on_video_change,
@@ -385,7 +419,8 @@ with gr.Blocks() as demo:
385
  preview_crop_btn.click(
386
  fn=preview_crop,
387
  inputs=[input_img, input_npy_select, input_video, expand_x, expand_y, offset_x, offset_y],
388
- outputs=[crop_preview,crop_preview_motion, result_status]
 
389
  )
390
 
391
  result_btn.click(
@@ -393,7 +428,8 @@ with gr.Blocks() as demo:
393
  inputs=[input_img, crop_face_checkbox, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
394
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
395
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
396
- outputs=[result_status, result_video, result_video_2, frames_output]
 
397
  )
398
 
399
  # Examples Section
 
15
  from media_pipe.draw_util import FaceMeshVisualizer
16
 
17
  from download_models import download
18
+ import torch
19
 
20
  # Download models and check for exists
21
  download()
22
 
23
+ # Check GPU availability
24
+ print("="*50)
25
+ print("πŸ” GPU Status Check:")
26
+ print(f" PyTorch version: {torch.__version__}")
27
+ print(f" CUDA available: {torch.cuda.is_available()}")
28
+ if torch.cuda.is_available():
29
+ print(f" CUDA version: {torch.version.cuda}")
30
+ print(f" GPU device: {torch.cuda.get_device_name(0)}")
31
+ print(f" GPU count: {torch.cuda.device_count()}")
32
+ else:
33
+ print(" ⚠️ No CUDA GPU detected - will use CPU")
34
+ print("="*50)
35
+
36
  PROCESSED_VIDEO_DIR = './processed_videos'
37
  TEMP_DIR = './temp'
38
  INFER_CONFIG_PATH = './configs/infer.yaml'
 
269
  return status, oo_video_path, all_video_path, frames_archive
270
 
271
  with gr.Blocks() as demo:
272
+ gr.Markdown("<h1 style='text-align: center;'>FollowYourEmoji</h1>")
 
 
 
273
 
274
+ # GPU Status Display
275
+ gpu_available = torch.cuda.is_available()
276
+ gpu_info = ""
277
+ if gpu_available:
278
+ gpu_name = torch.cuda.get_device_name(0)
279
+ gpu_info = f"πŸš€ **GPU Enabled**: {gpu_name}"
280
+ else:
281
+ gpu_info = "⚠️ **Running on CPU** (Generation will be slower)"
282
 
283
+ gr.Markdown(f"<div style='text-align: center; padding: 10px; background-color: #f0f0f0; border-radius: 5px;'>{gpu_info}</div>")
 
 
 
 
 
284
 
285
+ with gr.Box():
286
+ gr.Markdown("""
287
+ <div style='text-align: center;'>
288
+
289
+ ## πŸ“– How to Use This Demo
290
+
291
+ Follow these simple steps to create your animated emoji video:
292
+
293
+ **1. πŸ“Έ Upload Reference Image** β†’ Upload a portrait photo in the left panel
294
+ **2. 🎬 Upload Reference Video** β†’ Upload a video or select a preprocessed .npy file in the middle panel
295
+ **3. βœ‚οΈ Crop Face (Optional)** β†’ Enable face cropping to automatically fit the image to match the video motion
296
+ **4. πŸ‘οΈ Preview Animation** β†’ Click "Show Animation preview" to see how the motion will look
297
+ **5. βš™οΈ Adjust Settings** β†’ Fine-tune generation parameters at the bottom (steps, CFG scale, FPS, etc.)
298
+ **6. 🎨 Generate Video** β†’ Click "Generate Video" to create your animated result
299
+
300
+ πŸ’‘ **Tips**: Use face cropping if your portrait is too far away β€’ Preview the animation before generating β€’ Try the examples below!
301
+
302
+ </div>
303
+ """)
304
 
305
  # Main Layout: 3 columns - Image, Video, Results
306
  with gr.Row():
307
+ # Left Column: Image Upload
308
  with gr.Column(scale=1):
309
  gr.Markdown("### πŸ“Έ Reference Image")
310
  input_img = gr.Image(label="Upload reference image", type="filepath", height=400)
 
 
 
 
 
 
 
 
 
 
 
 
311
 
312
+ # Middle Column: Video Input, Crop Face, and Animation Preview
313
  with gr.Column(scale=1):
314
  gr.Markdown("### 🎬 Reference Video")
315
  input_video_type = gr.Radio(label="Input type", choices=["video","npy"], value="video")
 
323
  input_npy_refresh = gr.Button(value="Refresh NPY List")
324
  input_npy = gr.File(file_types=[".npy"], label="Upload .npy file")
325
 
326
+ gr.Markdown("---")
327
+ crop_face_checkbox = gr.Checkbox(label="βœ‚οΈ Crop face according to video",info="Enable this after uploading both image and video", value=False)
328
+ with gr.Accordion("Face Cropping Settings", open=False):
329
+ expand_x = gr.Slider(label="Expand X", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
330
+ expand_y = gr.Slider(label="Expand Y", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
331
+ offset_x = gr.Slider(label="Offset X", minimum=-1, maximum=1, value=0.0, step=0.01)
332
+ offset_y = gr.Slider(label="Offset Y", minimum=-1, maximum=1, value=0.0, step=0.01)
333
+
334
+ preview_crop_btn = gr.Button(value="Preview Crop", variant="secondary")
335
+ with gr.Row():
336
+ crop_preview = gr.Image(label="Crop Preview", height=200)
337
+ crop_preview_motion = gr.Image(label="Motion Preview", height=200)
338
+
339
  with gr.Accordion("Animation Preview", open=False):
340
  show_gif_btn = gr.Button(value="Show Animation Preview", variant="secondary")
341
  with gr.Row():
 
394
  input_video.change(
395
  fn=process_input_video,
396
  inputs=[input_video, input_video_save],
397
+ outputs=[result_status, input_npy, input_video_frames, output_fps_info],
398
+ show_progress="full"
399
  )
400
 
401
  input_npy_select.change(fn=update_frame_count, inputs=[input_npy_select], outputs=[input_video_frames])
402
  input_npy.change(fn=update_frame_count, inputs=[input_npy], outputs=[input_video_frames])
403
 
404
+ show_gif_btn.click(
405
+ fn=show_gif_for_npy,
406
+ inputs=[input_npy_select, input_video],
407
+ outputs=[gif_output, gif_output_align, result_status],
408
+ show_progress="full"
409
+ )
410
 
411
  input_video.change(
412
  fn=update_gif_on_video_change,
 
419
  preview_crop_btn.click(
420
  fn=preview_crop,
421
  inputs=[input_img, input_npy_select, input_video, expand_x, expand_y, offset_x, offset_y],
422
+ outputs=[crop_preview,crop_preview_motion, result_status],
423
+ show_progress="full"
424
  )
425
 
426
  result_btn.click(
 
428
  inputs=[input_img, crop_face_checkbox, expand_x, expand_y, offset_x, offset_y, input_video_type, input_video, input_npy_select, input_npy, input_video_frames,
429
  settings_steps, settings_cfg_scale, settings_seed, resolution_w, resolution_h,
430
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
431
+ outputs=[result_status, result_video, result_video_2, frames_output],
432
+ show_progress="full"
433
  )
434
 
435
  # Examples Section