binaryMao commited on
Commit
3b5085e
·
verified ·
1 Parent(s): 3673a6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -79
app.py CHANGED
@@ -1,40 +1,38 @@
1
  # -*- coding: utf-8 -*-
2
- import os, shlex, subprocess, tempfile, traceback, textwrap, time, glob
3
  import torch
4
  from huggingface_hub import snapshot_download
5
  from nemo.collections import asr as nemo_asr
6
  import gradio as gr
7
 
8
- # 1. CONFIGURATION DU MATÉRIEL ET DES MODÈLES
9
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
10
 
 
11
  MODELS = {
12
- "Soloba V1 (CTC)": ("RobotsMali/soloba-ctc-0.6b-v1", "ctc"),
13
- "Soloni V1 (RNNT)": ("RobotsMali/soloni-114m-tdt-ctc-v1", "rnnt"),
14
- "QuartzNet V1 (CTC-char)": ("RobotsMali/stt-bm-quartznet15x5-v1", "ctc_char"),
15
- "Soloba V0 (CTC)": ("RobotsMali/soloba-ctc-0.6b-v0", "ctc"),
16
- "Soloni V0 (RNNT)": ("RobotsMali/soloni-114m-tdt-ctc-v0", "rnnt"),
17
- "QuartzNet V0 (CTC-char)": ("RobotsMali/stt-bm-quartznet15x5-v0", "ctc_char"),
18
  }
19
 
20
- # 2. LOCALISATION DE LA VIDÉO D'EXEMPLE
21
- def get_absolute_example():
22
- names = ["MARALINKE.mp4", "maralinke.mp4", "example.mp4"]
23
- dirs = [".", "examples", "/home/user/app", "/home/user/app/examples"]
24
- for d in dirs:
25
- for n in names:
26
- p = os.path.join(d, n)
27
- if os.path.exists(p): return os.path.abspath(p)
28
- return None
29
-
30
- EXAMPLE_PATH = get_absolute_example()
31
  _cache = {}
32
 
33
- # 3. CHARGEMENT DES MODÈLES IA
 
 
 
 
 
 
34
  def load_model(name):
 
35
  if name in _cache: return _cache[name]
36
- _cache.clear()
37
- if torch.cuda.is_available(): torch.cuda.empty_cache()
 
38
 
39
  repo, mode = MODELS[name]
40
  folder = snapshot_download(repo, local_dir_use_symlinks=False)
@@ -42,71 +40,71 @@ def load_model(name):
42
 
43
  if mode == "rnnt":
44
  model = nemo_asr.models.EncDecHybridRNNTCTCBPEModel.restore_from(nemo_file)
45
- elif mode == "ctc_char":
46
- model = nemo_asr.models.EncDecCTCModel.restore_from(nemo_file)
47
  else:
48
  model = nemo_asr.models.EncDecCTCModelBPE.restore_from(nemo_file)
49
 
50
  model.to(DEVICE).eval()
 
 
 
51
  _cache[name] = model
52
  return model
53
 
54
- # 4. UTILITAIRE DE FORMATAGE SRT
55
  def format_srt_time(sec):
56
  td = time.gmtime(sec)
57
  ms = int((sec - int(sec)) * 1000)
58
  return f"{time.strftime('%H:%M:%S', td)},{ms:03}"
59
 
60
- # 5. PIPELINE DE TRAITEMENT (SEGMENTATION 10S + OFFSETS)
61
  def pipeline(video_in, model_name):
62
  tmp_dir = tempfile.mkdtemp()
63
  try:
64
- if not video_in: return "❌ Erreur : Source vide", None
65
 
66
- # Étape A : Extraction et Segmentation Audio
67
- yield "⏳ Découpage de l'audio en segments de 10s...", None
68
  full_wav = os.path.join(tmp_dir, "full.wav")
69
  subprocess.run(f"ffmpeg -y -i {shlex.quote(video_in)} -vn -ac 1 -ar 16000 {full_wav}", shell=True, check=True)
70
 
 
71
  segment_pattern = os.path.join(tmp_dir, "seg_%03d.wav")
72
  subprocess.run(f"ffmpeg -i {full_wav} -f segment -segment_time 10 -c copy {segment_pattern}", shell=True, check=True)
73
-
74
  audio_segments = sorted(glob.glob(os.path.join(tmp_dir, "seg_*.wav")))
75
- model = load_model(model_name)
76
 
77
- # Étape B : Transcription segmentée avec Offsets natifs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  all_words_ts = []
79
  for idx, seg_path in enumerate(audio_segments):
80
  base_time = idx * 10.0
81
- yield f"⏳ IA : Transcription segment {idx+1}/{len(audio_segments)}...", None
82
 
83
- # Utilisation de return_hypotheses pour les timestamps
84
  hyp = model.transcribe([seg_path], return_hypotheses=True)[0]
85
-
86
- if hasattr(hyp, 'word_offsets') and hyp.word_offsets:
87
- words = hyp.text.split()
 
88
  for i, word in enumerate(words):
89
- # Facteur de conversion frame->seconde (standard NeMo 0.02)
90
- rel_start = hyp.word_offsets[i] * 0.02
91
- all_words_ts.append({
92
- "word": word,
93
- "start": base_time + rel_start,
94
- "end": base_time + rel_start + 0.45
95
- })
96
  else:
97
- # Fallback temporel si offsets non dispos
98
- words = (hyp.text if hasattr(hyp, 'text') else str(hyp)).split()
99
- if words:
100
- gap = 10.0 / len(words)
101
- for i, w in enumerate(words):
102
- all_words_ts.append({
103
- "word": w,
104
- "start": base_time + (i * gap),
105
- "end": base_time + ((i+1) * gap)
106
- })
107
 
108
- # Étape C : Génération du SRT optimisé
109
- yield "⏳ Création du fichier de sous-titres...", None
110
  srt_path = os.path.join(tmp_dir, "final.srt")
111
  words_per_line = 6
112
  with open(srt_path, "w", encoding="utf-8") as f:
@@ -116,43 +114,41 @@ def pipeline(video_in, model_name):
116
  f.write(f"{format_srt_time(chunk[0]['start'])} --> {format_srt_time(chunk[-1]['end'])}\n")
117
  f.write(" ".join([c['word'] for c in chunk]) + "\n\n")
118
 
119
- # Étape D : Incrustation Finale (Burn-in)
120
- yield "⏳ Rendu vidéo final...", None
121
  out_path = os.path.abspath(f"robotsmali_final_{int(time.time())}.mp4")
 
 
 
122
  cmd_ffmpeg = (
123
  f"ffmpeg -y -i {shlex.quote(video_in)} "
124
- f"-vf \"subtitles={shlex.quote(srt_path)}:force_style='Alignment=2,FontSize=20,PrimaryColour=&H00FFFF&'\" "
125
- f"-c:v libx264 -pix_fmt yuv420p -movflags +faststart -c:a aac {out_path}"
126
  )
127
  subprocess.run(cmd_ffmpeg, shell=True, check=True)
128
 
129
- yield "✅ Synchronisation parfaite terminée !", out_path
130
 
131
  except Exception as e:
132
  traceback.print_exc()
133
- yield f"❌ Erreur : {str(e)}", None
134
 
135
  # 6. INTERFACE UTILISATEUR GRADIO
136
- with gr.Blocks(theme=gr.themes.Soft(), css="body {background-color: #0b1120;}") as demo:
137
- gr.HTML("<h1 style='text-align:center; color:#facc15;'>🤖 ROBOTSMALI </h1>")
138
- gr.Markdown("<p style='text-align:center; color:white;'>Segmentation NeMo</p>")
139
 
140
  with gr.Row():
141
- with gr.Column():
142
- v_in = gr.Video(label="Source (Webcam ou Fichier)", sources=["upload", "webcam"], interactive=True)
143
-
144
- m_sel = gr.Dropdown(list(MODELS.keys()), value="Soloba V1 (CTC)", label="Modèle IA")
145
- btn_run = gr.Button("🚀 GÉNÉRER SOUS-TITRES", variant="primary")
146
 
147
- if EXAMPLE_PATH:
148
- gr.Markdown("### 💡 Exemple")
149
- gr.Examples(examples=[[EXAMPLE_PATH, "Soloba V1 (CTC)"]], inputs=[v_in, m_sel])
150
-
151
- with gr.Column():
152
- status = gr.Markdown("### État\nPrêt")
153
- v_out = gr.Video(label="Résultat Final")
154
 
155
- btn_run.click(pipeline, [v_in, m_sel], [status, v_out])
156
 
157
  if __name__ == "__main__":
158
- demo.launch(debug=True)
 
1
  # -*- coding: utf-8 -*-
2
+ import os, shlex, subprocess, tempfile, traceback, time, glob, gc
3
  import torch
4
  from huggingface_hub import snapshot_download
5
  from nemo.collections import asr as nemo_asr
6
  import gradio as gr
7
 
8
+ # 1. CONFIGURATION MATÉRIEL
9
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
+ # 2. DICTIONNAIRE DES MODÈLES (Mis à jour selon votre capture d'écran)
12
  MODELS = {
13
+ "Soloba V3 (CTC)": ("RobotsMali/soloba-ctc-0.6b-v3", "ctc"),
14
+ "Soloba V1.5 (TDT)": ("RobotsMali/soloba-tdt-0.6b-v1.5", "rnnt"),
15
+ "Soloni V3 (TDT-CTC)": ("RobotsMali/soloni-114m-tdt-ctc-v3", "rnnt"),
16
+ "Soloni V2 (TDT-CTC)": ("RobotsMali/soloni-114m-tdt-ctc-v2", "rnnt"),
17
+ "Soloni MSE (Experimental)": ("RobotsMali/lau-soloni-114m-mse-k1", "ctc"),
18
+ "Soloba V0.5 (TDT)": ("RobotsMali/soloba-tdt-0.6b-v0.5", "rnnt"),
19
  }
20
 
 
 
 
 
 
 
 
 
 
 
 
21
  _cache = {}
22
 
23
+ def clear_memory():
24
+ """Nettoie la VRAM et la RAM pour éviter les débordements."""
25
+ _cache.clear()
26
+ gc.collect()
27
+ if torch.cuda.is_available():
28
+ torch.cuda.empty_cache()
29
+
30
  def load_model(name):
31
+ """Charge le modèle avec optimisation FP16 si possible."""
32
  if name in _cache: return _cache[name]
33
+
34
+ yield f"⏳ Chargement du modèle {name}..."
35
+ clear_memory()
36
 
37
  repo, mode = MODELS[name]
38
  folder = snapshot_download(repo, local_dir_use_symlinks=False)
 
40
 
41
  if mode == "rnnt":
42
  model = nemo_asr.models.EncDecHybridRNNTCTCBPEModel.restore_from(nemo_file)
 
 
43
  else:
44
  model = nemo_asr.models.EncDecCTCModelBPE.restore_from(nemo_file)
45
 
46
  model.to(DEVICE).eval()
47
+ if DEVICE == "cuda":
48
+ model.half() # Utilisation de la demi-précision pour gagner 50% de VRAM
49
+
50
  _cache[name] = model
51
  return model
52
 
 
53
  def format_srt_time(sec):
54
  td = time.gmtime(sec)
55
  ms = int((sec - int(sec)) * 1000)
56
  return f"{time.strftime('%H:%M:%S', td)},{ms:03}"
57
 
 
58
  def pipeline(video_in, model_name):
59
  tmp_dir = tempfile.mkdtemp()
60
  try:
61
+ if not video_in: return "❌ Source vide", None, None
62
 
63
+ # A. Extraction Audio
64
+ yield "⏳ Extraction de l'audio...", None, None
65
  full_wav = os.path.join(tmp_dir, "full.wav")
66
  subprocess.run(f"ffmpeg -y -i {shlex.quote(video_in)} -vn -ac 1 -ar 16000 {full_wav}", shell=True, check=True)
67
 
68
+ # B. Segmentation Temporelle (10s)
69
  segment_pattern = os.path.join(tmp_dir, "seg_%03d.wav")
70
  subprocess.run(f"ffmpeg -i {full_wav} -f segment -segment_time 10 -c copy {segment_pattern}", shell=True, check=True)
 
71
  audio_segments = sorted(glob.glob(os.path.join(tmp_dir, "seg_*.wav")))
 
72
 
73
+ # C. Chargement et Calcul de la Précision Temporelle
74
+ model_gen = load_model(model_name)
75
+ model = None
76
+ for update in model_gen:
77
+ if isinstance(update, str): yield update, None, None
78
+ else: model = update
79
+
80
+ # Calcul dynamique du stride (facteur de conversion frames -> secondes)
81
+ stride = 0.02
82
+ if hasattr(model, 'preprocessor') and hasattr(model.preprocessor, 'featurizer'):
83
+ hop = model.preprocessor.featurizer.hop_length
84
+ sr = model.preprocessor.featurizer.sample_rate
85
+ stride = hop / sr
86
+
87
+ # D. Transcription par segments
88
  all_words_ts = []
89
  for idx, seg_path in enumerate(audio_segments):
90
  base_time = idx * 10.0
91
+ yield f"⏳ IA : Transcription segment {idx+1}/{len(audio_segments)}...", None, None
92
 
 
93
  hyp = model.transcribe([seg_path], return_hypotheses=True)[0]
94
+ offsets = getattr(hyp, 'word_offsets', None)
95
+ words = hyp.text.split() if hasattr(hyp, 'text') else str(hyp).split()
96
+
97
+ if offsets and len(offsets) == len(words):
98
  for i, word in enumerate(words):
99
+ start_t = base_time + (offsets[i] * stride)
100
+ all_words_ts.append({"word": word, "start": start_t, "end": start_t + 0.45})
 
 
 
 
 
101
  else:
102
+ # Fallback : Répartition linéaire si les offsets manquent
103
+ gap = 10.0 / max(len(words), 1)
104
+ for i, w in enumerate(words):
105
+ all_words_ts.append({"word": w, "start": base_time + (i * gap), "end": base_time + ((i+1) * gap)})
 
 
 
 
 
 
106
 
107
+ # E. Génération du fichier SRT
 
108
  srt_path = os.path.join(tmp_dir, "final.srt")
109
  words_per_line = 6
110
  with open(srt_path, "w", encoding="utf-8") as f:
 
114
  f.write(f"{format_srt_time(chunk[0]['start'])} --> {format_srt_time(chunk[-1]['end'])}\n")
115
  f.write(" ".join([c['word'] for c in chunk]) + "\n\n")
116
 
117
+ # F. Encodage Vidéo avec Incrustation (Burn-in)
118
+ yield "⏳ Rendu vidéo final ...", None, srt_path
119
  out_path = os.path.abspath(f"robotsmali_final_{int(time.time())}.mp4")
120
+
121
+ # Protection des chemins pour FFmpeg (indispensable pour Windows/Linux)
122
+ safe_srt = srt_path.replace("\\", "/").replace(":", "\\:")
123
  cmd_ffmpeg = (
124
  f"ffmpeg -y -i {shlex.quote(video_in)} "
125
+ f"-vf \"subtitles='{safe_srt}':force_style='Alignment=2,FontSize=18,OutlineColour=&H80000000,BorderStyle=4'\" "
126
+ f"-c:v libx264 -preset fast -pix_fmt yuv420p -movflags +faststart -c:a aac {out_path}"
127
  )
128
  subprocess.run(cmd_ffmpeg, shell=True, check=True)
129
 
130
+ yield "✅ Transcription et Incrustation Terminées !", out_path, srt_path
131
 
132
  except Exception as e:
133
  traceback.print_exc()
134
+ yield f"❌ Erreur : {str(e)}", None, None
135
 
136
  # 6. INTERFACE UTILISATEUR GRADIO
137
+ with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo:
138
+ gr.HTML("<h1 style='text-align:center; color:#EAB308;'>🤖 ROBOTSMALI TRANSCRIPTION PRO</h1>")
 
139
 
140
  with gr.Row():
141
+ with gr.Column(scale=1):
142
+ v_in = gr.Video(label="Vidéo Source (Upload ou Webcam)", sources=["upload", "webcam"])
143
+ m_sel = gr.Dropdown(choices=list(MODELS.keys()), value="Soloba V3 (CTC)", label="Choisir le Modèle IA")
144
+ btn_run = gr.Button("🚀 GÉNÉRER LES SOUS-TITRES", variant="primary")
 
145
 
146
+ with gr.Column(scale=1):
147
+ status = gr.Markdown("### État\nPrêt à l'emploi.")
148
+ v_out = gr.Video(label="Vidéo Finale Incrustée")
149
+ f_srt = gr.File(label="Fichier Sous-titres (.SRT)")
 
 
 
150
 
151
+ btn_run.click(pipeline, [v_in, m_sel], [status, v_out, f_srt])
152
 
153
  if __name__ == "__main__":
154
+ demo.launch(debug=True, show_error=True)