Rajor78 commited on
Commit
56e1e3f
verified
1 Parent(s): 62f7194

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -32
app.py CHANGED
@@ -1,54 +1,53 @@
1
- import os
2
- import whisper
3
- import spacy
4
- import language_tool_python
5
  import gradio as gr
6
  import subprocess
 
 
 
 
7
  from docx import Document
8
 
9
  def extract_audio(video_path, audio_path):
10
- try:
11
- command = f"ffmpeg -i {video_path} -vn -acodec pcm_s16le -ar 16000 -ac 1 {audio_path}"
12
- subprocess.run(command, shell=True, check=True)
13
- return True
14
- except Exception as e:
15
- print(f"Error al extraer audio: {e}")
16
- return False
17
 
18
  def transcribe_audio(audio_path):
19
- model = whisper.load_model("base")
20
- result = model.transcribe(audio_path, word_timestamps=True)
21
- return result
 
 
 
 
 
22
 
23
  def correct_text(text):
24
  tool = language_tool_python.LanguageTool('es')
25
  matches = tool.check(text)
26
  return language_tool_python.utils.correct(text, matches)
27
 
28
- def create_word_doc(segments, output_path):
29
- doc = Document()
30
- for segment in segments:
31
- corrected_text = correct_text(segment['text'])
32
- doc.add_paragraph(corrected_text)
33
- doc.save(output_path)
34
- return output_path
35
-
36
  def process_video(video_file):
37
- audio_path = video_file.replace(".mp4", ".wav")
38
- word_output = video_file.replace(".mp4", "_transcription.docx")
 
 
 
 
 
 
 
 
 
39
 
40
- if extract_audio(video_file, audio_path):
41
- result = transcribe_audio(audio_path)
42
- segments = result['segments']
43
- doc_path = create_word_doc(segments, word_output)
44
- return "Transcripci贸n completada.", doc_path
45
- else:
46
- return "Error al procesar el archivo.", None
47
 
48
  demo = gr.Interface(
49
  fn=process_video,
50
  inputs=gr.File(label="Sube un archivo de video"),
51
- outputs=["text", gr.File(label="Descargar transcripci贸n")]
 
 
 
52
  )
53
 
54
  demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
  import subprocess
3
+ import os
4
+ from transformers import WhisperProcessor, WhisperForConditionalGeneration
5
+ import language_tool_python
6
+ from pydub import AudioSegment
7
  from docx import Document
8
 
9
  def extract_audio(video_path, audio_path):
10
+ command = f"ffmpeg -i '{video_path}' -ar 16000 -ac 1 -c:a pcm_s16le '{audio_path}' -y"
11
+ subprocess.run(command, shell=True, check=True)
12
+ return audio_path
 
 
 
 
13
 
14
  def transcribe_audio(audio_path):
15
+ processor = WhisperProcessor.from_pretrained("openai/whisper-base")
16
+ model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-base")
17
+
18
+ audio_input = processor(audio_path, return_tensors="pt", sampling_rate=16000)
19
+ result = model.generate(**audio_input)
20
+ transcription = processor.decode(result[0], skip_special_tokens=True)
21
+
22
+ return transcription
23
 
24
  def correct_text(text):
25
  tool = language_tool_python.LanguageTool('es')
26
  matches = tool.check(text)
27
  return language_tool_python.utils.correct(text, matches)
28
 
 
 
 
 
 
 
 
 
29
  def process_video(video_file):
30
+ video_path = video_file.name
31
+ audio_path = os.path.splitext(video_path)[0] + '.wav'
32
+
33
+ extract_audio(video_path, audio_path)
34
+ transcribed_text = transcribe_audio(audio_path)
35
+ corrected_text = correct_text(transcribed_text)
36
+
37
+ doc = Document()
38
+ doc.add_paragraph(corrected_text)
39
+ doc_path = "transcription.docx"
40
+ doc.save(doc_path)
41
 
42
+ return corrected_text, doc_path
 
 
 
 
 
 
43
 
44
  demo = gr.Interface(
45
  fn=process_video,
46
  inputs=gr.File(label="Sube un archivo de video"),
47
+ outputs=[
48
+ gr.Textbox(label="Texto transcrito y corregido"),
49
+ gr.File(label="Descargar transcripci贸n Word")
50
+ ]
51
  )
52
 
53
  demo.launch()