vumichien commited on
Commit
a17228d
·
verified ·
1 Parent(s): ed383b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -18,20 +18,20 @@ QUANTIZED_PITCH_MODEL_PATH = hf_hub_download(repo_id=PITCH_PATH, filename='quant
18
 
19
 
20
  ## word preprocessor
21
- processor_with_lm = Wav2Vec2ProcessorWithLM.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
22
- processor = Wav2Vec2Processor.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
23
 
24
  ### quantized model
25
- config = AutoConfig.from_pretrained(MODEL_PATH, use_auth_token=API_TOKEN)
26
  dummy_model = Wav2Vec2ForCTC(config)
27
  quantized_model = torch.quantization.quantize_dynamic(dummy_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
28
  quantized_model.load_state_dict(torch.load(QUANTIZED_MODEL_PATH))
29
 
30
  ## pitch preprocessor
31
- processor_pitch = Wav2Vec2Processor.from_pretrained(PITCH_PATH, use_auth_token=API_TOKEN)
32
 
33
  ### quantized pitch mode
34
- config = AutoConfig.from_pretrained(PITCH_PATH, use_auth_token=API_TOKEN)
35
  dummy_pitch_model = Wav2Vec2ForCTC(config)
36
  quantized_pitch_model = torch.quantization.quantize_dynamic(dummy_pitch_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
37
  quantized_pitch_model.load_state_dict(torch.load(QUANTIZED_PITCH_MODEL_PATH))
@@ -96,7 +96,6 @@ iface = gr.Interface(
96
  inputs=[
97
  gr.File(
98
  label="Audio file",
99
- source="upload",
100
  type="binary",
101
  ),
102
  gr.Textbox(
 
18
 
19
 
20
  ## word preprocessor
21
+ processor_with_lm = Wav2Vec2ProcessorWithLM.from_pretrained(MODEL_PATH, token=API_TOKEN)
22
+ processor = Wav2Vec2Processor.from_pretrained(MODEL_PATH, token=API_TOKEN)
23
 
24
  ### quantized model
25
+ config = AutoConfig.from_pretrained(MODEL_PATH, token=API_TOKEN)
26
  dummy_model = Wav2Vec2ForCTC(config)
27
  quantized_model = torch.quantization.quantize_dynamic(dummy_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
28
  quantized_model.load_state_dict(torch.load(QUANTIZED_MODEL_PATH))
29
 
30
  ## pitch preprocessor
31
+ processor_pitch = Wav2Vec2Processor.from_pretrained(PITCH_PATH, token=API_TOKEN)
32
 
33
  ### quantized pitch mode
34
+ config = AutoConfig.from_pretrained(PITCH_PATH, token=API_TOKEN)
35
  dummy_pitch_model = Wav2Vec2ForCTC(config)
36
  quantized_pitch_model = torch.quantization.quantize_dynamic(dummy_pitch_model, {torch.nn.Linear}, dtype=torch.qint8, inplace=True)
37
  quantized_pitch_model.load_state_dict(torch.load(QUANTIZED_PITCH_MODEL_PATH))
 
96
  inputs=[
97
  gr.File(
98
  label="Audio file",
 
99
  type="binary",
100
  ),
101
  gr.Textbox(