import os import time from pathlib import Path import torch from transformers import pipeline torch.cuda.empty_cache() if __name__ == "__main__": folder_path = "./part_1" for roots, dirs, files in os.walk(folder_path): for file in files: if file.lower().endswith('.en.srt') | file.lower().endswith('.en-orig.srt'): print('[srt] file are found.') else: if file.lower().endswith('.mp4'): video_path = Path(roots+'/'+file) if video_path.exists(): split_video_path = str(video_path).split('.mp4')[-2] create_srt_path = split_video_path.__add__('.en.srt') create_srt_path = Path(create_srt_path) # if create_srt_path.exists() and create_srt_path.read_text(encoding="utf-8"): if create_srt_path.exists() and create_srt_path.stat().st_size > 0: # content = create_srt_path.read_text(encoding="utf-8") print(f"content are already found in [srt] file. filepath: >>>> {create_srt_path}") else: # Initialize the pipeline (handles chunking intelligently) device = "cuda" if torch.cuda.is_available() else "cpu" pipe = pipeline( "automatic-speech-recognition", model="openai/whisper-large-v3", torch_dtype=torch.float16, device=device, ) # Generate with built-in chunking and stride # chunk_length_s=30: Process 30s at a time # stride_length_s=5: Overlap chunks by 5s to connect sentences correctly outputs = pipe( str(video_path), chunk_length_s=30, stride_length_s=5, generate_kwargs={"task": "translate", "language": "hindi"} ) # print(outputs["text"]) print(">>>>>>>>>>>>>>>>>>> :", create_srt_path) torch.cuda.empty_cache() # Save it so you don't lose it! with open(create_srt_path, "w") as f: f.write(outputs["text"]) else: print('video_path are not working...') # ---------------------------------------------------------------