Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,6 @@ import json
|
|
| 7 |
from threading import Thread
|
| 8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 9 |
import spaces
|
| 10 |
-
import moviepy.editor as mp
|
| 11 |
import time
|
| 12 |
import langdetect
|
| 13 |
import uuid
|
|
@@ -31,197 +30,73 @@ def cleanup_files(*files):
|
|
| 31 |
os.remove(file)
|
| 32 |
print(f"Removed file: {file}")
|
| 33 |
|
| 34 |
-
def
|
| 35 |
-
print(
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
'key': 'FFmpegExtractAudio',
|
| 41 |
-
'preferredcodec': 'wav',
|
| 42 |
-
}],
|
| 43 |
-
'outtmpl': output_path,
|
| 44 |
-
'keepvideo': True,
|
| 45 |
-
}
|
| 46 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
| 47 |
-
ydl.download([url])
|
| 48 |
-
|
| 49 |
-
# Check if the file was renamed to .wav.wav
|
| 50 |
-
if os.path.exists(output_path + ".wav"):
|
| 51 |
-
os.rename(output_path + ".wav", output_path)
|
| 52 |
-
|
| 53 |
-
if os.path.exists(output_path):
|
| 54 |
-
print(f"Audio download completed. File saved at: {output_path}")
|
| 55 |
-
print(f"File size: {os.path.getsize(output_path)} bytes")
|
| 56 |
-
else:
|
| 57 |
-
print(f"Error: File {output_path} not found after download.")
|
| 58 |
-
|
| 59 |
-
return output_path
|
| 60 |
-
|
| 61 |
|
| 62 |
def transcribe_audio(file_path):
|
| 63 |
print(f"Starting transcription of file: {file_path}")
|
| 64 |
temp_audio = None
|
| 65 |
if file_path.endswith(('.mp4', '.avi', '.mov', '.flv')):
|
| 66 |
print("Video file detected. Extracting audio...")
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
temp_audio = generate_unique_filename(".wav")
|
| 70 |
-
video.audio.write_audiofile(temp_audio)
|
| 71 |
-
file_path = temp_audio
|
| 72 |
-
except Exception as e:
|
| 73 |
-
print(f"Error extracting audio from video: {e}")
|
| 74 |
-
raise
|
| 75 |
-
|
| 76 |
-
print(f"Does the file exist? {os.path.exists(file_path)}")
|
| 77 |
-
print(f"File size: {os.path.getsize(file_path) if os.path.exists(file_path) else 'N/A'} bytes")
|
| 78 |
|
| 79 |
output_file = generate_unique_filename(".json")
|
| 80 |
command = [
|
| 81 |
-
"insanely-fast-whisper",
|
| 82 |
-
"--
|
| 83 |
-
"--
|
| 84 |
-
"--model-name", "openai/whisper-large-v3",
|
| 85 |
-
"--task", "transcribe",
|
| 86 |
-
"--timestamp", "chunk",
|
| 87 |
"--transcript-path", output_file
|
| 88 |
]
|
| 89 |
-
|
| 90 |
-
try:
|
| 91 |
-
result = subprocess.run(command, check=True, capture_output=True, text=True)
|
| 92 |
-
print(f"Standard output: {result.stdout}")
|
| 93 |
-
print(f"Error output: {result.stderr}")
|
| 94 |
-
except subprocess.CalledProcessError as e:
|
| 95 |
-
print(f"Error running insanely-fast-whisper: {e}")
|
| 96 |
-
print(f"Standard output: {e.stdout}")
|
| 97 |
-
print(f"Error output: {e.stderr}")
|
| 98 |
-
raise
|
| 99 |
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
with open(output_file, "r") as f:
|
| 103 |
-
transcription = json.load(f)
|
| 104 |
-
except json.JSONDecodeError as e:
|
| 105 |
-
print(f"Error decoding JSON: {e}")
|
| 106 |
-
print(f"File content: {open(output_file, 'r').read()}")
|
| 107 |
-
raise
|
| 108 |
|
| 109 |
-
|
| 110 |
-
result = transcription["text"]
|
| 111 |
-
else:
|
| 112 |
-
result = " ".join([chunk["text"] for chunk in transcription.get("chunks", [])])
|
| 113 |
-
|
| 114 |
-
print("Transcription completed.")
|
| 115 |
-
|
| 116 |
-
# Cleanup
|
| 117 |
cleanup_files(output_file)
|
| 118 |
if temp_audio:
|
| 119 |
cleanup_files(temp_audio)
|
| 120 |
|
| 121 |
return result
|
| 122 |
|
| 123 |
-
|
| 124 |
def generate_summary_stream(transcription):
|
| 125 |
-
print("Starting summary generation...")
|
| 126 |
-
print(f"Transcription length: {len(transcription)} characters")
|
| 127 |
-
|
| 128 |
detected_language = langdetect.detect(transcription)
|
| 129 |
-
|
| 130 |
prompt = f"""Summarize the following video transcription in 150-300 words.
|
| 131 |
The summary should be in the same language as the transcription, which is detected as {detected_language}.
|
| 132 |
-
Please ensure that the summary captures the main points and key ideas of the transcription:
|
| 133 |
{transcription[:300000]}..."""
|
| 134 |
|
| 135 |
response, history = model.chat(tokenizer, prompt, history=[])
|
| 136 |
-
print(f"Final summary generated: {response[:100]}...")
|
| 137 |
-
print("Summary generation completed.")
|
| 138 |
return response
|
| 139 |
|
| 140 |
-
def process_youtube(url):
|
| 141 |
-
if not url:
|
| 142 |
-
print("YouTube URL not provided.")
|
| 143 |
-
return "Please enter a YouTube URL.", None
|
| 144 |
-
print(f"Processing YouTube URL: {url}")
|
| 145 |
-
|
| 146 |
-
audio_file = None
|
| 147 |
-
try:
|
| 148 |
-
audio_file = download_youtube_audio(url)
|
| 149 |
-
if not os.path.exists(audio_file):
|
| 150 |
-
raise FileNotFoundError(f"File {audio_file} does not exist after download.")
|
| 151 |
-
|
| 152 |
-
print(f"Audio file found: {audio_file}")
|
| 153 |
-
print("Starting transcription...")
|
| 154 |
-
transcription = transcribe_audio(audio_file)
|
| 155 |
-
print(f"Transcription completed. Length: {len(transcription)} characters")
|
| 156 |
-
return transcription, None
|
| 157 |
-
except Exception as e:
|
| 158 |
-
print(f"Error processing YouTube: {e}")
|
| 159 |
-
return f"Processing error: {str(e)}", None
|
| 160 |
-
finally:
|
| 161 |
-
if audio_file and os.path.exists(audio_file):
|
| 162 |
-
cleanup_files(audio_file)
|
| 163 |
-
print(f"Directory content after processing: {os.listdir('.')}")
|
| 164 |
-
|
| 165 |
def process_uploaded_video(video_path):
|
| 166 |
-
print(f"Processing uploaded video: {video_path}")
|
| 167 |
try:
|
| 168 |
-
print("Starting transcription...")
|
| 169 |
transcription = transcribe_audio(video_path)
|
| 170 |
-
print(f"Transcription completed. Length: {len(transcription)} characters")
|
| 171 |
return transcription, None
|
| 172 |
except Exception as e:
|
| 173 |
-
print(f"Error processing video: {e}")
|
| 174 |
return f"Processing error: {str(e)}", None
|
| 175 |
|
| 176 |
-
|
| 177 |
-
with
|
| 178 |
-
gr.Markdown(
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
Upload a video or provide a YouTube link to get a transcription and AI-generated summary. HF Zero GPU has a usage time limit. So if you want to run longer videos I recommend you clone the space. Remove @Spaces.gpu from the code and run it locally on your GPU!
|
| 183 |
-
"""
|
| 184 |
-
)
|
| 185 |
|
| 186 |
with gr.Tabs():
|
| 187 |
with gr.TabItem("π€ Video Upload"):
|
| 188 |
-
video_input = gr.
|
| 189 |
video_button = gr.Button("π Process Video", variant="primary")
|
| 190 |
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
url_button = gr.Button("π Process URL", variant="primary")
|
| 194 |
-
|
| 195 |
-
with gr.Row():
|
| 196 |
-
with gr.Column():
|
| 197 |
-
transcription_output = gr.Textbox(label="π Transcription", lines=10, show_copy_button=True)
|
| 198 |
-
with gr.Column():
|
| 199 |
-
summary_output = gr.Textbox(label="π Summary", lines=10, show_copy_button=True)
|
| 200 |
-
|
| 201 |
summary_button = gr.Button("π Generate Summary", variant="secondary")
|
| 202 |
|
| 203 |
-
|
| 204 |
-
"""
|
| 205 |
-
### How to use:
|
| 206 |
-
1. Upload a video or paste a YouTube link.
|
| 207 |
-
2. Click 'Process' to get the transcription.
|
| 208 |
-
3. Click 'Generate Summary' to get a summary of the content.
|
| 209 |
-
|
| 210 |
-
*Note: Processing may take a few minutes depending on the video length.*
|
| 211 |
-
"""
|
| 212 |
-
)
|
| 213 |
-
|
| 214 |
-
def process_video_and_update(video):
|
| 215 |
-
if video is None:
|
| 216 |
-
return "No video uploaded.", "Please upload a video."
|
| 217 |
-
print(f"Video received: {video}")
|
| 218 |
-
transcription, _ = process_uploaded_video(video)
|
| 219 |
-
print(f"Returned transcription: {transcription[:100] if transcription else 'No transcription generated'}...")
|
| 220 |
-
return transcription or "Transcription error", ""
|
| 221 |
-
|
| 222 |
-
video_button.click(process_video_and_update, inputs=[video_input], outputs=[transcription_output, summary_output])
|
| 223 |
-
url_button.click(process_youtube, inputs=[url_input], outputs=[transcription_output, summary_output])
|
| 224 |
summary_button.click(generate_summary_stream, inputs=[transcription_output], outputs=[summary_output])
|
| 225 |
|
| 226 |
-
|
| 227 |
-
demo.launch()
|
|
|
|
| 7 |
from threading import Thread
|
| 8 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 9 |
import spaces
|
|
|
|
| 10 |
import time
|
| 11 |
import langdetect
|
| 12 |
import uuid
|
|
|
|
| 30 |
os.remove(file)
|
| 31 |
print(f"Removed file: {file}")
|
| 32 |
|
| 33 |
+
def extract_audio_ffmpeg(video_path):
|
| 34 |
+
print("Extracting audio using ffmpeg...")
|
| 35 |
+
audio_path = generate_unique_filename(".wav")
|
| 36 |
+
command = ["ffmpeg", "-i", video_path, "-q:a", "0", "-map", "a", audio_path, "-y"]
|
| 37 |
+
subprocess.run(command, check=True)
|
| 38 |
+
return audio_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
def transcribe_audio(file_path):
|
| 41 |
print(f"Starting transcription of file: {file_path}")
|
| 42 |
temp_audio = None
|
| 43 |
if file_path.endswith(('.mp4', '.avi', '.mov', '.flv')):
|
| 44 |
print("Video file detected. Extracting audio...")
|
| 45 |
+
temp_audio = extract_audio_ffmpeg(file_path)
|
| 46 |
+
file_path = temp_audio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
output_file = generate_unique_filename(".json")
|
| 49 |
command = [
|
| 50 |
+
"insanely-fast-whisper", "--file-name", file_path,
|
| 51 |
+
"--device-id", "0", "--model-name", "openai/whisper-large-v3",
|
| 52 |
+
"--task", "transcribe", "--timestamp", "chunk",
|
|
|
|
|
|
|
|
|
|
| 53 |
"--transcript-path", output_file
|
| 54 |
]
|
| 55 |
+
subprocess.run(command, check=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
|
| 57 |
+
with open(output_file, "r") as f:
|
| 58 |
+
transcription = json.load(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
+
result = transcription.get("text", " ".join([chunk["text"] for chunk in transcription.get("chunks", [])]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
cleanup_files(output_file)
|
| 62 |
if temp_audio:
|
| 63 |
cleanup_files(temp_audio)
|
| 64 |
|
| 65 |
return result
|
| 66 |
|
|
|
|
| 67 |
def generate_summary_stream(transcription):
|
|
|
|
|
|
|
|
|
|
| 68 |
detected_language = langdetect.detect(transcription)
|
|
|
|
| 69 |
prompt = f"""Summarize the following video transcription in 150-300 words.
|
| 70 |
The summary should be in the same language as the transcription, which is detected as {detected_language}.
|
|
|
|
| 71 |
{transcription[:300000]}..."""
|
| 72 |
|
| 73 |
response, history = model.chat(tokenizer, prompt, history=[])
|
|
|
|
|
|
|
| 74 |
return response
|
| 75 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
def process_uploaded_video(video_path):
|
|
|
|
| 77 |
try:
|
|
|
|
| 78 |
transcription = transcribe_audio(video_path)
|
|
|
|
| 79 |
return transcription, None
|
| 80 |
except Exception as e:
|
|
|
|
| 81 |
return f"Processing error: {str(e)}", None
|
| 82 |
|
| 83 |
+
demo = gr.Blocks()
|
| 84 |
+
with demo:
|
| 85 |
+
gr.Markdown("""
|
| 86 |
+
# π₯ AI Video Transcription & Summary
|
| 87 |
+
Upload a video or provide a YouTube link to get a transcription and AI-generated summary.
|
| 88 |
+
""")
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
with gr.Tabs():
|
| 91 |
with gr.TabItem("π€ Video Upload"):
|
| 92 |
+
video_input = gr.File(label="Upload a video file")
|
| 93 |
video_button = gr.Button("π Process Video", variant="primary")
|
| 94 |
|
| 95 |
+
transcription_output = gr.Textbox(label="π Transcription", lines=10, show_copy_button=True)
|
| 96 |
+
summary_output = gr.Textbox(label="π Summary", lines=10, show_copy_button=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
summary_button = gr.Button("π Generate Summary", variant="secondary")
|
| 98 |
|
| 99 |
+
video_button.click(process_uploaded_video, inputs=[video_input], outputs=[transcription_output, summary_output])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
summary_button.click(generate_summary_stream, inputs=[transcription_output], outputs=[summary_output])
|
| 101 |
|
| 102 |
+
demo.launch()
|
|
|