on1onmangoes commited on
Commit
0256fc1
·
1 Parent(s): c6d9314

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -19,8 +19,8 @@ from transformers.pipelines.audio_utils import ffmpeg_read
19
  HF_TOKEN = os.environ.get("HF_TOKEN")
20
 
21
  # set up the diarization pipeline
22
- #diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.0", use_auth_token=HF_TOKEN)
23
- diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=HF_TOKEN)
24
 
25
  if torch.cuda.is_available():
26
  diarization_pipeline.to(torch.device("cuda"))
@@ -32,7 +32,7 @@ import gradio as gr
32
  def transcribe(audio_path):
33
  # Run diarization while we wait for Whisper JAX
34
  diarization = diarization_pipeline(audio_path)
35
- segments = diarization.for_json()["content"]
36
  # Segments = diarization.for_json()["content"]
37
  # Segments = str(diarization)
38
  transcription = "SAML Output"
 
19
  HF_TOKEN = os.environ.get("HF_TOKEN")
20
 
21
  # set up the diarization pipeline
22
+ diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.0", use_auth_token=HF_TOKEN)
23
+ #diarization_pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=HF_TOKEN)
24
 
25
  if torch.cuda.is_available():
26
  diarization_pipeline.to(torch.device("cuda"))
 
32
  def transcribe(audio_path):
33
  # Run diarization while we wait for Whisper JAX
34
  diarization = diarization_pipeline(audio_path)
35
+ #segments = diarization.for_json()["content"]
36
  # Segments = diarization.for_json()["content"]
37
  # Segments = str(diarization)
38
  transcription = "SAML Output"