Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from huggingface_hub import snapshot_download
|
|
| 5 |
from nemo.collections import asr as nemo_asr
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
-
# 1. CONFIGURATION
|
| 9 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
| 11 |
MODELS = {
|
|
@@ -23,65 +23,98 @@ MODELS = {
|
|
| 23 |
"Traduction Soloni (ST)": ("RobotsMali/st-soloni-114m-tdt-ctc", "rnnt"),
|
| 24 |
}
|
| 25 |
|
| 26 |
-
#
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
_cache = {}
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def get_model(name):
|
| 32 |
if name in _cache: return _cache[name]
|
| 33 |
-
|
| 34 |
-
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
| 35 |
-
|
| 36 |
repo, _ = MODELS[name]
|
|
|
|
|
|
|
| 37 |
folder = snapshot_download(repo, local_dir_use_symlinks=False)
|
| 38 |
nemo_file = next((os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".nemo")), None)
|
| 39 |
|
|
|
|
|
|
|
|
|
|
| 40 |
from nemo.core.connectors.save_restore_connector import SaveRestoreConnector
|
| 41 |
model = nemo_asr.models.ASRModel.restore_from(
|
| 42 |
nemo_file,
|
| 43 |
map_location=torch.device(DEVICE),
|
| 44 |
save_restore_connector=SaveRestoreConnector()
|
| 45 |
)
|
|
|
|
| 46 |
model.to(DEVICE).eval()
|
| 47 |
-
if DEVICE == "cuda":
|
|
|
|
|
|
|
| 48 |
_cache[name] = model
|
| 49 |
return model
|
| 50 |
|
|
|
|
| 51 |
def format_srt_time(sec):
|
| 52 |
td = time.gmtime(sec)
|
| 53 |
ms = int((sec - int(sec)) * 1000)
|
| 54 |
return f"{time.strftime('%H:%M:%S', td)},{ms:03}"
|
| 55 |
|
|
|
|
| 56 |
def pipeline(video_in, model_name):
|
| 57 |
tmp_dir = tempfile.mkdtemp()
|
| 58 |
try:
|
| 59 |
-
if not video_in: return "❌
|
| 60 |
|
| 61 |
-
|
|
|
|
| 62 |
full_wav = os.path.join(tmp_dir, "full.wav")
|
| 63 |
subprocess.run(f"ffmpeg -y -i {shlex.quote(video_in)} -vn -ac 1 -ar 16000 {full_wav}", shell=True, check=True)
|
| 64 |
|
| 65 |
-
|
|
|
|
| 66 |
subprocess.run(f"ffmpeg -i {full_wav} -f segment -segment_time 20 -c copy {os.path.join(tmp_dir, 'seg_%03d.wav')}", shell=True, check=True)
|
| 67 |
audio_segments = sorted(glob.glob(os.path.join(tmp_dir, "seg_*.wav")))
|
| 68 |
|
| 69 |
-
|
|
|
|
| 70 |
model = get_model(model_name)
|
| 71 |
|
| 72 |
all_words_ts = []
|
| 73 |
for idx, seg_path in enumerate(audio_segments):
|
| 74 |
base_time = idx * 20
|
| 75 |
-
yield f"🎙️ Transcription {idx+1}/{len(audio_segments)}...", None
|
| 76 |
hyp = model.transcribe([seg_path], return_hypotheses=True)[0]
|
| 77 |
if isinstance(hyp, list): hyp = hyp[0]
|
| 78 |
text = hyp.text if hasattr(hyp, 'text') else str(hyp)
|
| 79 |
words = text.split()
|
|
|
|
|
|
|
| 80 |
gap = 20.0 / max(len(words), 1)
|
| 81 |
for i, w in enumerate(words):
|
| 82 |
all_words_ts.append({"word": w, "start": base_time + (i * gap), "end": base_time + ((i+1) * gap)})
|
| 83 |
|
| 84 |
-
|
|
|
|
| 85 |
srt_path = os.path.join(tmp_dir, "final.srt")
|
| 86 |
with open(srt_path, "w", encoding="utf-8") as f:
|
| 87 |
for i in range(0, len(all_words_ts), 6):
|
|
@@ -89,38 +122,42 @@ def pipeline(video_in, model_name):
|
|
| 89 |
f.write(f"{(i//6)+1}\n{format_srt_time(chunk[0]['start'])} --> {format_srt_time(chunk[-1]['end'])}\n")
|
| 90 |
f.write(" ".join([c['word'] for c in chunk]) + "\n\n")
|
| 91 |
|
| 92 |
-
out_path = os.path.abspath(f"
|
| 93 |
safe_srt = srt_path.replace("\\", "/").replace(":", "\\:")
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
yield "✅ Terminé !", out_path
|
|
|
|
| 97 |
except Exception as e:
|
| 98 |
yield f"❌ Erreur : {str(e)}", None
|
| 99 |
finally:
|
| 100 |
if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir)
|
| 101 |
|
| 102 |
-
# INTERFACE
|
| 103 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 104 |
-
gr.HTML("<
|
| 105 |
|
| 106 |
with gr.Row():
|
| 107 |
with gr.Column():
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
|
| 112 |
-
#
|
| 113 |
-
if
|
| 114 |
gr.Examples(
|
| 115 |
-
examples=[[
|
| 116 |
-
inputs=[
|
| 117 |
-
label="
|
| 118 |
)
|
| 119 |
|
| 120 |
with gr.Column():
|
| 121 |
-
status = gr.Markdown("
|
| 122 |
-
|
| 123 |
|
| 124 |
-
|
| 125 |
|
| 126 |
demo.launch()
|
|
|
|
| 5 |
from nemo.collections import asr as nemo_asr
|
| 6 |
import gradio as gr
|
| 7 |
|
| 8 |
+
# 1. CONFIGURATION MATÉRIEL ET LISTE DES MODÈLES ROBOTSMALI
|
| 9 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
|
| 11 |
MODELS = {
|
|
|
|
| 23 |
"Traduction Soloni (ST)": ("RobotsMali/st-soloni-114m-tdt-ctc", "rnnt"),
|
| 24 |
}
|
| 25 |
|
| 26 |
+
# --- OPTIMISATION : DETECTION DE LA VIDEO DANS LE DOSSIER EXAMPLES ---
|
| 27 |
+
def find_example_video():
|
| 28 |
+
# Liste des noms possibles basés sur ta capture d'écran
|
| 29 |
+
paths = [
|
| 30 |
+
"examples/MARALINKE_FIXED.mp4",
|
| 31 |
+
"examples/MARALINKE.mp4",
|
| 32 |
+
"MARALINKE.mp4"
|
| 33 |
+
]
|
| 34 |
+
for p in paths:
|
| 35 |
+
if os.path.exists(p):
|
| 36 |
+
return p
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
EXAMPLE_PATH = find_example_video()
|
| 40 |
|
| 41 |
_cache = {}
|
| 42 |
|
| 43 |
+
# 2. GESTION DE LA MÉMOIRE ET CHARGEMENT DU MODÈLE
|
| 44 |
+
def clear_memory():
|
| 45 |
+
_cache.clear()
|
| 46 |
+
gc.collect()
|
| 47 |
+
if torch.cuda.is_available():
|
| 48 |
+
torch.cuda.empty_cache()
|
| 49 |
+
|
| 50 |
def get_model(name):
|
| 51 |
if name in _cache: return _cache[name]
|
| 52 |
+
clear_memory()
|
|
|
|
|
|
|
| 53 |
repo, _ = MODELS[name]
|
| 54 |
+
|
| 55 |
+
print(f"📥 Téléchargement de {repo}...")
|
| 56 |
folder = snapshot_download(repo, local_dir_use_symlinks=False)
|
| 57 |
nemo_file = next((os.path.join(folder, f) for f in os.listdir(folder) if f.endswith(".nemo")), None)
|
| 58 |
|
| 59 |
+
if not nemo_file: raise FileNotFoundError("Fichier .nemo introuvable.")
|
| 60 |
+
|
| 61 |
+
# Optimisation RobotsMali : Connecteur flexible pour éviter l'erreur state_dict
|
| 62 |
from nemo.core.connectors.save_restore_connector import SaveRestoreConnector
|
| 63 |
model = nemo_asr.models.ASRModel.restore_from(
|
| 64 |
nemo_file,
|
| 65 |
map_location=torch.device(DEVICE),
|
| 66 |
save_restore_connector=SaveRestoreConnector()
|
| 67 |
)
|
| 68 |
+
|
| 69 |
model.to(DEVICE).eval()
|
| 70 |
+
if DEVICE == "cuda":
|
| 71 |
+
model.half()
|
| 72 |
+
|
| 73 |
_cache[name] = model
|
| 74 |
return model
|
| 75 |
|
| 76 |
+
# 3. UTILITAIRES
|
| 77 |
def format_srt_time(sec):
|
| 78 |
td = time.gmtime(sec)
|
| 79 |
ms = int((sec - int(sec)) * 1000)
|
| 80 |
return f"{time.strftime('%H:%M:%S', td)},{ms:03}"
|
| 81 |
|
| 82 |
+
# 4. PIPELINE DE TRANSCRIPTION
|
| 83 |
def pipeline(video_in, model_name):
|
| 84 |
tmp_dir = tempfile.mkdtemp()
|
| 85 |
try:
|
| 86 |
+
if not video_in: return "❌ Veuillez sélectionner une vidéo.", None
|
| 87 |
|
| 88 |
+
# A. Extraction Audio
|
| 89 |
+
yield "⏳ Phase 1/4 : Extraction audio...", None
|
| 90 |
full_wav = os.path.join(tmp_dir, "full.wav")
|
| 91 |
subprocess.run(f"ffmpeg -y -i {shlex.quote(video_in)} -vn -ac 1 -ar 16000 {full_wav}", shell=True, check=True)
|
| 92 |
|
| 93 |
+
# B. Segmentation
|
| 94 |
+
yield "⏳ Phase 2/4 : Segmentation (20s)...", None
|
| 95 |
subprocess.run(f"ffmpeg -i {full_wav} -f segment -segment_time 20 -c copy {os.path.join(tmp_dir, 'seg_%03d.wav')}", shell=True, check=True)
|
| 96 |
audio_segments = sorted(glob.glob(os.path.join(tmp_dir, "seg_*.wav")))
|
| 97 |
|
| 98 |
+
# C. Transcription IA
|
| 99 |
+
yield f"⏳ Phase 3/4 : Chargement de {model_name}...", None
|
| 100 |
model = get_model(model_name)
|
| 101 |
|
| 102 |
all_words_ts = []
|
| 103 |
for idx, seg_path in enumerate(audio_segments):
|
| 104 |
base_time = idx * 20
|
| 105 |
+
yield f"🎙️ Transcription segment {idx+1}/{len(audio_segments)}...", None
|
| 106 |
hyp = model.transcribe([seg_path], return_hypotheses=True)[0]
|
| 107 |
if isinstance(hyp, list): hyp = hyp[0]
|
| 108 |
text = hyp.text if hasattr(hyp, 'text') else str(hyp)
|
| 109 |
words = text.split()
|
| 110 |
+
|
| 111 |
+
# Répartition temporelle
|
| 112 |
gap = 20.0 / max(len(words), 1)
|
| 113 |
for i, w in enumerate(words):
|
| 114 |
all_words_ts.append({"word": w, "start": base_time + (i * gap), "end": base_time + ((i+1) * gap)})
|
| 115 |
|
| 116 |
+
# D. Génération SRT et Rendu Vidéo
|
| 117 |
+
yield "⏳ Phase 4/4 : Incrustation sous-titres...", None
|
| 118 |
srt_path = os.path.join(tmp_dir, "final.srt")
|
| 119 |
with open(srt_path, "w", encoding="utf-8") as f:
|
| 120 |
for i in range(0, len(all_words_ts), 6):
|
|
|
|
| 122 |
f.write(f"{(i//6)+1}\n{format_srt_time(chunk[0]['start'])} --> {format_srt_time(chunk[-1]['end'])}\n")
|
| 123 |
f.write(" ".join([c['word'] for c in chunk]) + "\n\n")
|
| 124 |
|
| 125 |
+
out_path = os.path.abspath(f"robotsmali_result_{int(time.time())}.mp4")
|
| 126 |
safe_srt = srt_path.replace("\\", "/").replace(":", "\\:")
|
| 127 |
+
|
| 128 |
+
# Style : Jaune, Taille 18, Centré en bas
|
| 129 |
+
cmd = f"ffmpeg -y -i {shlex.quote(video_in)} -vf \"subtitles='{safe_srt}':force_style='Alignment=2,FontSize=18,PrimaryColour=&H00FFFF'\" -c:v libx264 -preset ultrafast -c:a aac {out_path}"
|
| 130 |
+
subprocess.run(cmd, shell=True, check=True)
|
| 131 |
|
| 132 |
yield "✅ Terminé !", out_path
|
| 133 |
+
|
| 134 |
except Exception as e:
|
| 135 |
yield f"❌ Erreur : {str(e)}", None
|
| 136 |
finally:
|
| 137 |
if os.path.exists(tmp_dir): shutil.rmtree(tmp_dir)
|
| 138 |
|
| 139 |
+
# 5. INTERFACE GRADIO
|
| 140 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 141 |
+
gr.HTML("<div style='text-align:center;'><h1>🤖 RobotsMali Speech Laboratory</h1><p>Testez nos modèles de transcription et traduction</p></div>")
|
| 142 |
|
| 143 |
with gr.Row():
|
| 144 |
with gr.Column():
|
| 145 |
+
v_input = gr.Video(label="Vidéo")
|
| 146 |
+
m_input = gr.Dropdown(choices=list(MODELS.keys()), value="Soloba V3 (CTC)", label="Modèle")
|
| 147 |
+
run_btn = gr.Button("🚀 GÉNÉRER", variant="primary")
|
| 148 |
|
| 149 |
+
# --- AFFICHAGE DE L'EXEMPLE SI TROUVÉ ---
|
| 150 |
+
if EXAMPLE_PATH:
|
| 151 |
gr.Examples(
|
| 152 |
+
examples=[[EXAMPLE_PATH, "Soloba V3 (CTC)"]],
|
| 153 |
+
inputs=[v_input, m_input],
|
| 154 |
+
label="Vidéo d'exemple"
|
| 155 |
)
|
| 156 |
|
| 157 |
with gr.Column():
|
| 158 |
+
status = gr.Markdown("### État\nPrêt.")
|
| 159 |
+
v_output = gr.Video(label="Résultat")
|
| 160 |
|
| 161 |
+
run_btn.click(pipeline, [v_input, m_input], [status, v_output])
|
| 162 |
|
| 163 |
demo.launch()
|