Vasudevakrishna commited on
Commit
912afa9
·
1 Parent(s): 2a9f4d0

Mispelled.

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import gradio as gr
2
  import torch
3
- import wisperx
4
  from model import MainQLoraModel
5
  from configs import get_config_phase2
6
  from transformers import AutoTokenizer, AutoProcessor
@@ -11,7 +11,7 @@ config = get_config_phase2()
11
  tokenizer = AutoTokenizer.from_pretrained(config.get("phi2_model_name"), trust_remote_code=True)
12
  processor = AutoProcessor.from_pretrained(config.get("clip_model_name"), trust_remote_code=True)
13
  llmModel = MainQLoraModel(tokenizer, config).to(config.get("device"))
14
- audio_model = wisperx.load_model('tiny', 'cpu', compute_type="float16")
15
 
16
 
17
  def generate_answers(img=None, aud = None, q = None, max_tokens = 30):
 
1
  import gradio as gr
2
  import torch
3
+ import whisperx
4
  from model import MainQLoraModel
5
  from configs import get_config_phase2
6
  from transformers import AutoTokenizer, AutoProcessor
 
11
  tokenizer = AutoTokenizer.from_pretrained(config.get("phi2_model_name"), trust_remote_code=True)
12
  processor = AutoProcessor.from_pretrained(config.get("clip_model_name"), trust_remote_code=True)
13
  llmModel = MainQLoraModel(tokenizer, config).to(config.get("device"))
14
+ audio_model = whisperx.load_model('tiny', 'cpu', compute_type="float16")
15
 
16
 
17
  def generate_answers(img=None, aud = None, q = None, max_tokens = 30):