diarization1Mæló
Browse files
app.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
| 1 |
-
# app.py –
|
| 2 |
import os
|
| 3 |
import gradio as gr
|
| 4 |
import spaces
|
| 5 |
from transformers import pipeline
|
| 6 |
from pyannote.audio import Pipeline
|
| 7 |
-
import tempfile
|
| 8 |
import torch
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
MODEL_NAME = "palli23/whisper-small-sam_spjall"
|
| 12 |
|
|
@@ -15,8 +15,11 @@ def transcribe_with_diarization(audio_path):
|
|
| 15 |
if not audio_path:
|
| 16 |
return "Hladdu upp hljóðskrá"
|
| 17 |
|
| 18 |
-
#
|
| 19 |
-
with safe_globals([
|
|
|
|
|
|
|
|
|
|
| 20 |
diarization = Pipeline.from_pretrained(
|
| 21 |
"pyannote/speaker-diarization-3.1",
|
| 22 |
token=os.getenv("HF_TOKEN")
|
|
@@ -24,6 +27,7 @@ def transcribe_with_diarization(audio_path):
|
|
| 24 |
|
| 25 |
dia = diarization(audio_path)
|
| 26 |
|
|
|
|
| 27 |
asr = pipeline(
|
| 28 |
"automatic-speech-recognition",
|
| 29 |
model=MODEL_NAME,
|
|
@@ -35,23 +39,21 @@ def transcribe_with_diarization(audio_path):
|
|
| 35 |
for turn, _, speaker in dia.itertracks(yield_label=True):
|
| 36 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
| 37 |
dia.crop(audio_path, turn).export(f.name, format="wav")
|
| 38 |
-
|
| 39 |
|
| 40 |
-
text = asr(
|
| 41 |
result.append(f"[MÆLENDI {speaker}] {text}")
|
| 42 |
-
os.unlink(
|
| 43 |
|
| 44 |
return "\n".join(result) or "Ekkert heyrt"
|
| 45 |
|
|
|
|
| 46 |
with gr.Blocks() as demo:
|
| 47 |
gr.Markdown("# Íslenskt ASR + Mælendagreining")
|
| 48 |
-
gr.Markdown("**Whisper-small + pyannote 3.1 ·
|
| 49 |
-
gr.Markdown("Fullkominn podcast-transcript með réttum mælendum")
|
| 50 |
|
| 51 |
-
audio = gr.Audio(type="filepath"
|
| 52 |
-
btn = gr.Button("Transcribe með mælendum", variant="primary"
|
| 53 |
-
out = gr.Textbox(lines=35
|
| 54 |
|
| 55 |
-
btn.click(transcribe_with_diarization
|
| 56 |
-
|
| 57 |
-
demo.launch(auth=("beta", "beta2025"))
|
|
|
|
| 1 |
+
# app.py – FIXED Pyannote UnpicklingError (PyTorch 2.6+ Compatible)
|
| 2 |
import os
|
| 3 |
import gradio as gr
|
| 4 |
import spaces
|
| 5 |
from transformers import pipeline
|
| 6 |
from pyannote.audio import Pipeline
|
|
|
|
| 7 |
import torch
|
| 8 |
+
import tempfile
|
| 9 |
+
from torch.serialization import safe_globals # ← KEY FIX
|
| 10 |
|
| 11 |
MODEL_NAME = "palli23/whisper-small-sam_spjall"
|
| 12 |
|
|
|
|
| 15 |
if not audio_path:
|
| 16 |
return "Hladdu upp hljóðskrá"
|
| 17 |
|
| 18 |
+
# FIX: Allowlist blocked globals for PyTorch 2.6+
|
| 19 |
+
with safe_globals([
|
| 20 |
+
torch.torch_version.TorchVersion,
|
| 21 |
+
'pyannote.audio.core.task.Specifications' # Add if needed
|
| 22 |
+
]):
|
| 23 |
diarization = Pipeline.from_pretrained(
|
| 24 |
"pyannote/speaker-diarization-3.1",
|
| 25 |
token=os.getenv("HF_TOKEN")
|
|
|
|
| 27 |
|
| 28 |
dia = diarization(audio_path)
|
| 29 |
|
| 30 |
+
# Whisper-small
|
| 31 |
asr = pipeline(
|
| 32 |
"automatic-speech-recognition",
|
| 33 |
model=MODEL_NAME,
|
|
|
|
| 39 |
for turn, _, speaker in dia.itertracks(yield_label=True):
|
| 40 |
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
| 41 |
dia.crop(audio_path, turn).export(f.name, format="wav")
|
| 42 |
+
segment_path = f.name
|
| 43 |
|
| 44 |
+
text = asr(segment_path)["text"].strip()
|
| 45 |
result.append(f"[MÆLENDI {speaker}] {text}")
|
| 46 |
+
os.unlink(segment_path)
|
| 47 |
|
| 48 |
return "\n".join(result) or "Ekkert heyrt"
|
| 49 |
|
| 50 |
+
# Interface
|
| 51 |
with gr.Blocks() as demo:
|
| 52 |
gr.Markdown("# Íslenskt ASR + Mælendagreining")
|
| 53 |
+
gr.Markdown("**Whisper-small + pyannote 3.1 · Fixed PyTorch 2.6+**")
|
|
|
|
| 54 |
|
| 55 |
+
audio = gr.Audio(type="filepath")
|
| 56 |
+
btn = gr.Button("Transcribe með mælendum", variant="primary")
|
| 57 |
+
out = gr.Textbox(lines=35)
|
| 58 |
|
| 59 |
+
btn.click(transcribe_with_diarization
|
|
|
|
|
|