Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,54 +1,53 @@
|
|
| 1 |
-
import os
|
| 2 |
-
import whisper
|
| 3 |
-
import spacy
|
| 4 |
-
import language_tool_python
|
| 5 |
import gradio as gr
|
| 6 |
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
from docx import Document
|
| 8 |
|
| 9 |
def extract_audio(video_path, audio_path):
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
return True
|
| 14 |
-
except Exception as e:
|
| 15 |
-
print(f"Error al extraer audio: {e}")
|
| 16 |
-
return False
|
| 17 |
|
| 18 |
def transcribe_audio(audio_path):
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
def correct_text(text):
|
| 24 |
tool = language_tool_python.LanguageTool('es')
|
| 25 |
matches = tool.check(text)
|
| 26 |
return language_tool_python.utils.correct(text, matches)
|
| 27 |
|
| 28 |
-
def create_word_doc(segments, output_path):
|
| 29 |
-
doc = Document()
|
| 30 |
-
for segment in segments:
|
| 31 |
-
corrected_text = correct_text(segment['text'])
|
| 32 |
-
doc.add_paragraph(corrected_text)
|
| 33 |
-
doc.save(output_path)
|
| 34 |
-
return output_path
|
| 35 |
-
|
| 36 |
def process_video(video_file):
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
-
|
| 41 |
-
result = transcribe_audio(audio_path)
|
| 42 |
-
segments = result['segments']
|
| 43 |
-
doc_path = create_word_doc(segments, word_output)
|
| 44 |
-
return "Transcripci贸n completada.", doc_path
|
| 45 |
-
else:
|
| 46 |
-
return "Error al procesar el archivo.", None
|
| 47 |
|
| 48 |
demo = gr.Interface(
|
| 49 |
fn=process_video,
|
| 50 |
inputs=gr.File(label="Sube un archivo de video"),
|
| 51 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
| 52 |
)
|
| 53 |
|
| 54 |
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import subprocess
|
| 3 |
+
import os
|
| 4 |
+
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
| 5 |
+
import language_tool_python
|
| 6 |
+
from pydub import AudioSegment
|
| 7 |
from docx import Document
|
| 8 |
|
| 9 |
def extract_audio(video_path, audio_path):
|
| 10 |
+
command = f"ffmpeg -i '{video_path}' -ar 16000 -ac 1 -c:a pcm_s16le '{audio_path}' -y"
|
| 11 |
+
subprocess.run(command, shell=True, check=True)
|
| 12 |
+
return audio_path
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
def transcribe_audio(audio_path):
|
| 15 |
+
processor = WhisperProcessor.from_pretrained("openai/whisper-base")
|
| 16 |
+
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-base")
|
| 17 |
+
|
| 18 |
+
audio_input = processor(audio_path, return_tensors="pt", sampling_rate=16000)
|
| 19 |
+
result = model.generate(**audio_input)
|
| 20 |
+
transcription = processor.decode(result[0], skip_special_tokens=True)
|
| 21 |
+
|
| 22 |
+
return transcription
|
| 23 |
|
| 24 |
def correct_text(text):
|
| 25 |
tool = language_tool_python.LanguageTool('es')
|
| 26 |
matches = tool.check(text)
|
| 27 |
return language_tool_python.utils.correct(text, matches)
|
| 28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
def process_video(video_file):
|
| 30 |
+
video_path = video_file.name
|
| 31 |
+
audio_path = os.path.splitext(video_path)[0] + '.wav'
|
| 32 |
+
|
| 33 |
+
extract_audio(video_path, audio_path)
|
| 34 |
+
transcribed_text = transcribe_audio(audio_path)
|
| 35 |
+
corrected_text = correct_text(transcribed_text)
|
| 36 |
+
|
| 37 |
+
doc = Document()
|
| 38 |
+
doc.add_paragraph(corrected_text)
|
| 39 |
+
doc_path = "transcription.docx"
|
| 40 |
+
doc.save(doc_path)
|
| 41 |
|
| 42 |
+
return corrected_text, doc_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
demo = gr.Interface(
|
| 45 |
fn=process_video,
|
| 46 |
inputs=gr.File(label="Sube un archivo de video"),
|
| 47 |
+
outputs=[
|
| 48 |
+
gr.Textbox(label="Texto transcrito y corregido"),
|
| 49 |
+
gr.File(label="Descargar transcripci贸n Word")
|
| 50 |
+
]
|
| 51 |
)
|
| 52 |
|
| 53 |
demo.launch()
|