liuyang commited on
Commit
3c30a0a
·
1 Parent(s): baffcb8

try add token

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -32,7 +32,7 @@ from faster_whisper import WhisperModel, BatchedInferencePipeline
32
  from faster_whisper.vad import VadOptions
33
  import requests
34
  import base64
35
- from pyannote.audio import Pipeline, Inference
36
 
37
  import os, sys, importlib.util, pathlib, ctypes, tempfile, wave, math
38
  import json
@@ -463,7 +463,9 @@ class WhisperTranscriber:
463
  global _embedder
464
  if _embedder is None:
465
  # window="whole" to compute one embedding per provided chunk
466
- _embedder = Inference("pyannote/embedding", window="whole", device=torch.device("cuda"),use_auth_token=os.getenv("HF_TOKEN"),)
 
 
467
  return _embedder
468
 
469
  def assign_speakers_to_transcription(self, transcription_results, diarization_segments):
 
32
  from faster_whisper.vad import VadOptions
33
  import requests
34
  import base64
35
+ from pyannote.audio import Pipeline, Inference, Model
36
 
37
  import os, sys, importlib.util, pathlib, ctypes, tempfile, wave, math
38
  import json
 
463
  global _embedder
464
  if _embedder is None:
465
  # window="whole" to compute one embedding per provided chunk
466
+ token = os.getenv("HF_TOKEN")
467
+ model = Model.from_pretrained("pyannote/embedding", use_auth_token=token)
468
+ _embedder = Inference(model, window="whole", device=torch.device("cuda"))
469
  return _embedder
470
 
471
  def assign_speakers_to_transcription(self, transcription_results, diarization_segments):