Spaces:
Running
on
Zero
Running
on
Zero
add new whisper model
Browse files- whisper.py +4 -3
whisper.py
CHANGED
|
@@ -8,9 +8,10 @@ import torch
|
|
| 8 |
device = 0 if torch.cuda.is_available() else "cpu"
|
| 9 |
torch_dtype = torch.float32
|
| 10 |
|
| 11 |
-
|
| 12 |
-
MODEL_NAME = "openai/whisper-large-v3"
|
| 13 |
-
|
|
|
|
| 14 |
processor = WhisperProcessor.from_pretrained(MODEL_NAME)
|
| 15 |
pipeline_vad = Pipeline.from_pretrained("./pyannote/config.yaml")
|
| 16 |
threshold = 15000 # adjust max duration threshold
|
|
|
|
| 8 |
device = 0 if torch.cuda.is_available() else "cpu"
|
| 9 |
torch_dtype = torch.float32
|
| 10 |
|
| 11 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 12 |
+
#MODEL_NAME = "openai/whisper-large-v3"
|
| 13 |
+
MODEL_NAME = "projecte-aina/whisper-large-v3-ca-es-synth-cs"
|
| 14 |
+
model = WhisperForConditionalGeneration.from_pretrained(MODEL_NAME, torch_dtype=torch_dtype,token=HF_TOKEN).to(device)
|
| 15 |
processor = WhisperProcessor.from_pretrained(MODEL_NAME)
|
| 16 |
pipeline_vad = Pipeline.from_pretrained("./pyannote/config.yaml")
|
| 17 |
threshold = 15000 # adjust max duration threshold
|