File size: 2,025 Bytes
dc35ead
76c263b
 
 
 
 
dc35ead
 
 
76c263b
 
 
dc35ead
 
 
76c263b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc35ead
 
 
 
76c263b
 
1ce7f77
76c263b
 
 
 
 
 
 
 
 
dc35ead
 
 
1ce7f77
76c263b
1ce7f77
 
 
 
 
 
 
 
 
76c263b
dc35ead
 
 
76c263b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import spaces
import gradio as gr
import os
import torch
from pyannote.audio import Pipeline




# Configuration
HF_TOKEN = os.environ.get('HF_TOKEN')




# Pyannote Diarization
diarization_pipeline = None
try:
    if HF_TOKEN:
        diarization_pipeline = Pipeline.from_pretrained(
            "pyannote/speaker-diarization-3.1",
            use_auth_token=HF_TOKEN
        )
        if torch.cuda.is_available():
            diarization_pipeline.to(torch.device("cuda"))
            print("Pyannote: LOADED (GPU)")
        else:
            print("Pyannote: LOADED (CPU)")
except Exception as e:
    print(f"Pyannote Error: {e}")




@spaces.GPU
def diarize_audio(audio_path, min_speakers=1, max_speakers=5):
    if not diarization_pipeline:
        return {"error": "Diarization not available. Check HF_TOKEN."}
    try:
        diarization = diarization_pipeline(audio_path, min_speakers=int(min_speakers), max_speakers=int(max_speakers))
        speakers = []
        for turn, _, speaker in diarization.itertracks(yield_label=True):
            speakers.append({"speaker": speaker, "start": round(turn.start, 2), "end": round(turn.end, 2)})
        return {"segments": speakers, "num_speakers": len(set(s["speaker"] for s in speakers))}
    except Exception as e:
        return {"error": str(e)}




with gr.Blocks(title="STTR - Speaker Diarization") as demo:
    gr.Markdown("# STTR - Speaker Diarization")
    gr.Markdown("### Identify who speaks when (pyannote 3.1)")
    
    audio_in = gr.Audio(type="filepath", label="Upload Audio")
    with gr.Row():
        min_spk = gr.Slider(1, 10, value=1, step=1, label="Min Speakers")
        max_spk = gr.Slider(1, 10, value=5, step=1, label="Max Speakers")
    btn = gr.Button("Analyze Speakers", variant="primary")
    output = gr.JSON(label="Speaker Segments")
    btn.click(diarize_audio, [audio_in, min_spk, max_spk], output, api_name="/diarize")




if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860)))