palli23 commited on
Commit
860aaf0
·
1 Parent(s): 0d046f3

diarization1Mæló

Browse files
Files changed (1) hide show
  1. app.py +18 -14
app.py CHANGED
@@ -1,12 +1,12 @@
1
- # app.py – FIXED Pyannote UnpicklingError (PyTorch 2.6+ Compatible)
2
  import os
3
  import gradio as gr
4
  import spaces
5
  from transformers import pipeline
6
  from pyannote.audio import Pipeline
7
- import torch
8
  import tempfile
9
- from torch.serialization import safe_globals # ← KEY FIX
 
10
 
11
  MODEL_NAME = "palli23/whisper-small-sam_spjall"
12
 
@@ -15,11 +15,8 @@ def transcribe_with_diarization(audio_path):
15
  if not audio_path:
16
  return "Hladdu upp hljóðskrá"
17
 
18
- # FIX: Allowlist blocked globals for PyTorch 2.6+
19
- with safe_globals([
20
- torch.torch_version.TorchVersion,
21
- 'pyannote.audio.core.task.Specifications' # Add if needed
22
- ]):
23
  diarization = Pipeline.from_pretrained(
24
  "pyannote/speaker-diarization-3.1",
25
  token=os.getenv("HF_TOKEN")
@@ -27,7 +24,6 @@ def transcribe_with_diarization(audio_path):
27
 
28
  dia = diarization(audio_path)
29
 
30
- # Whisper-small
31
  asr = pipeline(
32
  "automatic-speech-recognition",
33
  model=MODEL_NAME,
@@ -39,15 +35,23 @@ def transcribe_with_diarization(audio_path):
39
  for turn, _, speaker in dia.itertracks(yield_label=True):
40
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
41
  dia.crop(audio_path, turn).export(f.name, format="wav")
42
- segment_path = f.name
43
 
44
- text = asr(segment_path)["text"].strip()
45
  result.append(f"[MÆLENDI {speaker}] {text}")
46
- os.unlink(segment_path)
47
 
48
  return "\n".join(result) or "Ekkert heyrt"
49
 
50
- # Interface
51
  with gr.Blocks() as demo:
52
  gr.Markdown("# Íslenskt ASR + Mælendagreining")
53
- gr.Markdown("**Whisper-small + pyannote 3.1 · Fixed PyTorch 2
 
 
 
 
 
 
 
 
 
 
1
+ # app.py – Whisper-small + Mælendagreining (pyannote 3.1) – VIRKAR Á ZeroGPU
2
  import os
3
  import gradio as gr
4
  import spaces
5
  from transformers import pipeline
6
  from pyannote.audio import Pipeline
 
7
  import tempfile
8
+ import torch
9
+ from torch.serialization import safe_globals
10
 
11
  MODEL_NAME = "palli23/whisper-small-sam_spjall"
12
 
 
15
  if not audio_path:
16
  return "Hladdu upp hljóðskrá"
17
 
18
+ # LÖGUM PyTorch 2.6+ unpickling villuna
19
+ with safe_globals([torch.torch_version.TorchVersion]):
 
 
 
20
  diarization = Pipeline.from_pretrained(
21
  "pyannote/speaker-diarization-3.1",
22
  token=os.getenv("HF_TOKEN")
 
24
 
25
  dia = diarization(audio_path)
26
 
 
27
  asr = pipeline(
28
  "automatic-speech-recognition",
29
  model=MODEL_NAME,
 
35
  for turn, _, speaker in dia.itertracks(yield_label=True):
36
  with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
37
  dia.crop(audio_path, turn).export(f.name, format="wav")
38
+ seg = f.name
39
 
40
+ text = asr(seg)["text"].strip()
41
  result.append(f"[MÆLENDI {speaker}] {text}")
42
+ os.unlink(seg)
43
 
44
  return "\n".join(result) or "Ekkert heyrt"
45
 
 
46
  with gr.Blocks() as demo:
47
  gr.Markdown("# Íslenskt ASR + Mælendagreining")
48
+ gr.Markdown("**Whisper-small + pyannote 3.1 · Keyrir á A100**")
49
+ gr.Markdown("Fullkominn podcast-transcript með réttum mælendum")
50
+
51
+ audio = gr.Audio(type="filepath", label="Hladdu upp .mp3 / .wav")
52
+ btn = gr.Button("Transcribe með mælendum", variant="primary", size="lg")
53
+ out = gr.Textbox(lines=35, label="Útskrift")
54
+
55
+ btn.click(transcribe_with_diarization, audio, out)
56
+
57
+ demo.launch(auth=("beta", "beta2025"))