Upload folder using huggingface_hub
Browse files
asr_diarization/inference.py
CHANGED
|
@@ -2,8 +2,7 @@ import os
|
|
| 2 |
from .pipeline import ASR_Diarization
|
| 3 |
|
| 4 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 5 |
-
|
| 6 |
-
pipe = ASR_Diarization(HF_TOKEN, HF_TOKEN1)
|
| 7 |
|
| 8 |
def inference(inputs):
|
| 9 |
return pipe(inputs)
|
|
|
|
| 2 |
from .pipeline import ASR_Diarization
|
| 3 |
|
| 4 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
| 5 |
+
pipe = ASR_Diarization(HF_TOKEN)
|
|
|
|
| 6 |
|
| 7 |
def inference(inputs):
|
| 8 |
return pipe(inputs)
|
asr_diarization/pipeline.py
CHANGED
|
@@ -13,11 +13,10 @@ from jiwer import wer, Compose, ToLowerCase, RemovePunctuation, RemoveMultipleSp
|
|
| 13 |
|
| 14 |
|
| 15 |
class ASR_Diarization:
|
| 16 |
-
def __init__(self, HF_TOKEN,
|
| 17 |
diar_model="pyannote/speaker-diarization-3.1",
|
| 18 |
asr_model="Capstone04/TrainedWhisper"):
|
| 19 |
self.HF_TOKEN = HF_TOKEN
|
| 20 |
-
self.HF_TOKEN1 = HF_TOKEN1
|
| 21 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 22 |
|
| 23 |
# Load diarization model
|
|
@@ -29,7 +28,6 @@ class ASR_Diarization:
|
|
| 29 |
model=asr_model,
|
| 30 |
device=0 if self.device == "cuda" else -1,
|
| 31 |
return_timestamps=True,
|
| 32 |
-
token=HF_TOKEN1
|
| 33 |
)
|
| 34 |
|
| 35 |
def run_diarization(self, audio_path):
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
class ASR_Diarization:
|
| 16 |
+
def __init__(self, HF_TOKEN,
|
| 17 |
diar_model="pyannote/speaker-diarization-3.1",
|
| 18 |
asr_model="Capstone04/TrainedWhisper"):
|
| 19 |
self.HF_TOKEN = HF_TOKEN
|
|
|
|
| 20 |
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 21 |
|
| 22 |
# Load diarization model
|
|
|
|
| 28 |
model=asr_model,
|
| 29 |
device=0 if self.device == "cuda" else -1,
|
| 30 |
return_timestamps=True,
|
|
|
|
| 31 |
)
|
| 32 |
|
| 33 |
def run_diarization(self, audio_path):
|