wanglamao commited on
Commit
8d62622
·
1 Parent(s): f4bb8a5
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -212,7 +212,9 @@ bicodec_tokenizer_path = args.bicodec_tokenizer_path or os.path.join(
212
  gpa_model_path = args.gpa_model_path or model_base_path
213
 
214
  # Instantiate Model
215
- print(f"Initializing GPA Inference System on {args.device}...")
 
 
216
  print(f"Tokenizer path: {tokenizer_path}")
217
  print(f"Text tokenizer path: {text_tokenizer_path}")
218
  print(f"BiCodec tokenizer path: {bicodec_tokenizer_path}")
@@ -225,7 +227,7 @@ inference = GPAInference(
225
  bicodec_tokenizer_path=bicodec_tokenizer_path,
226
  gpa_model_path=gpa_model_path,
227
  output_dir=None, # Will use temporary directory
228
- device="cuda" if torch.cuda.is_available() else "cpu",
229
  )
230
 
231
  # Launch Gradio Demo
 
212
  gpa_model_path = args.gpa_model_path or model_base_path
213
 
214
  # Instantiate Model
215
+ device = "cuda" if torch.cuda.is_available() else "cpu"
216
+
217
+ print(f"Initializing GPA Inference System on {device}...")
218
  print(f"Tokenizer path: {tokenizer_path}")
219
  print(f"Text tokenizer path: {text_tokenizer_path}")
220
  print(f"BiCodec tokenizer path: {bicodec_tokenizer_path}")
 
227
  bicodec_tokenizer_path=bicodec_tokenizer_path,
228
  gpa_model_path=gpa_model_path,
229
  output_dir=None, # Will use temporary directory
230
+ device=device,
231
  )
232
 
233
  # Launch Gradio Demo