Spaces:
Running
Running
| import whisper | |
| def transcribe_video(video_path, model_size="tiny"): | |
| """ | |
| Transcribe video into SRT file using OpenAI Whisper. | |
| """ | |
| # Load model (tiny, base, or small) | |
| model = whisper.load_model(model_size) | |
| # Run transcription | |
| result = model.transcribe(video_path, language="English") | |
| # Save as .srt | |
| srt_path = video_path + ".srt" | |
| with open(srt_path, "w", encoding="utf-8") as f: | |
| for i, segment in enumerate(result["segments"], start=1): | |
| start = segment["start"] | |
| end = segment["end"] | |
| text = segment["text"].strip() | |
| def srt_time(seconds): | |
| ms = int((seconds % 1) * 1000) | |
| h = int(seconds // 3600) | |
| m = int((seconds % 3600) // 60) | |
| s = int(seconds % 60) | |
| return f"{h:02}:{m:02}:{s:02},{ms:03}" | |
| f.write(f"{i}\n{srt_time(start)} --> {srt_time(end)}\n{text}\n\n") | |
| return srt_path | |