|
|
import streamlit as st |
|
|
import ffmpeg |
|
|
import os |
|
|
from transformers import pipeline |
|
|
import tempfile |
|
|
|
|
|
|
|
|
def extract_audio(video_path, output_audio_path): |
|
|
"""Extract audio from a video file.""" |
|
|
ffmpeg.input(video_path).output(output_audio_path, format="mp3", ac=1, ar="16000").run(overwrite_output=True) |
|
|
return output_audio_path |
|
|
|
|
|
def transcribe_audio(audio_path): |
|
|
"""Transcribe audio using OpenAI Whisper.""" |
|
|
asr_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
transcription = asr_pipeline(audio_path, return_timestamps=True) |
|
|
return transcription["text"] |
|
|
|
|
|
|
|
|
st.title("Video-to-Text Transcription App") |
|
|
st.write("Upload a video file to transcribe its audio content into text.") |
|
|
|
|
|
|
|
|
uploaded_file = st.file_uploader("Upload your video file (e.g., .mp4, .mov, etc.)", type=["mp4", "mov", "avi", "mkv"]) |
|
|
|
|
|
if uploaded_file is not None: |
|
|
with st.spinner("Processing video..."): |
|
|
|
|
|
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_video: |
|
|
temp_video.write(uploaded_file.read()) |
|
|
video_path = temp_video.name |
|
|
|
|
|
|
|
|
audio_path = os.path.join(tempfile.gettempdir(), "extracted_audio.mp3") |
|
|
extract_audio(video_path, audio_path) |
|
|
|
|
|
|
|
|
transcription = transcribe_audio(audio_path) |
|
|
|
|
|
|
|
|
st.subheader("Transcription") |
|
|
st.text_area("Transcribed Text", transcription, height=300) |
|
|
|
|
|
|
|
|
output_file = "transcription.txt" |
|
|
with open(output_file, "w") as f: |
|
|
f.write(transcription) |
|
|
|
|
|
|
|
|
with open(output_file, "rb") as file: |
|
|
st.download_button( |
|
|
label="Download Transcription", |
|
|
data=file, |
|
|
file_name="transcription.txt", |
|
|
mime="text/plain" |
|
|
) |
|
|
|