John Ho commited on
Commit
1679d51
·
1 Parent(s): ce0e222

testing cuda for processor

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -22,7 +22,7 @@ subprocess.run(
22
 
23
  # The model is trained on 8.0 FPS which we recommend for optimal inference
24
 
25
- DTYPE = torch.bfloat16
26
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
27
  logger.info(f"Device: {DEVICE}, dtype: {DTYPE}")
28
 
@@ -70,7 +70,9 @@ def inference(
70
  use_flash_attention: bool = True,
71
  ):
72
  # default processor
73
- processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
 
 
74
  model = load_model(use_flash_attention=use_flash_attention)
75
  fps = get_fps_ffmpeg(video_path)
76
  logger.info(f"{os.path.basename(video_path)} FPS: {fps}")
 
22
 
23
  # The model is trained on 8.0 FPS which we recommend for optimal inference
24
 
25
+ DTYPE = torch.bfloat16 if torch.cuda.is_available() else torch.float16
26
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
27
  logger.info(f"Device: {DEVICE}, dtype: {DTYPE}")
28
 
 
70
  use_flash_attention: bool = True,
71
  ):
72
  # default processor
73
+ processor = AutoProcessor.from_pretrained(
74
+ "Qwen/Qwen2.5-VL-7B-Instruct", device_map=DEVICE, use_fast=True
75
+ )
76
  model = load_model(use_flash_attention=use_flash_attention)
77
  fps = get_fps_ffmpeg(video_path)
78
  logger.info(f"{os.path.basename(video_path)} FPS: {fps}")