ProgramerSalar commited on
Commit
02b450e
·
verified ·
1 Parent(s): cba99f4

Upload generate_caption.py

Browse files
Files changed (1) hide show
  1. generate_caption.py +67 -0
generate_caption.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ from pathlib import Path
4
+ import torch
5
+ from transformers import pipeline
6
+ torch.cuda.empty_cache()
7
+
8
+
9
+ if __name__ == "__main__":
10
+
11
+ folder_path = "./part_1"
12
+
13
+ for roots, dirs, files in os.walk(folder_path):
14
+ for file in files:
15
+
16
+ if file.lower().endswith('.en.srt') | file.lower().endswith('.en-orig.srt'):
17
+ print('[srt] file are found.')
18
+
19
+
20
+ else:
21
+ if file.lower().endswith('.mp4'):
22
+ video_path = Path(roots+'/'+file)
23
+
24
+ if video_path.exists():
25
+ split_video_path = str(video_path).split('.mp4')[-2]
26
+ create_srt_path = split_video_path.__add__('.en.srt')
27
+ create_srt_path = Path(create_srt_path)
28
+ # if create_srt_path.exists() and create_srt_path.read_text(encoding="utf-8"):
29
+ if create_srt_path.exists() and create_srt_path.stat().st_size > 0:
30
+ # content = create_srt_path.read_text(encoding="utf-8")
31
+ print(f"content are already found in [srt] file. filepath: >>>> {create_srt_path}")
32
+
33
+ else:
34
+
35
+
36
+ # Initialize the pipeline (handles chunking intelligently)
37
+ device = "cuda" if torch.cuda.is_available() else "cpu"
38
+ pipe = pipeline(
39
+ "automatic-speech-recognition",
40
+ model="openai/whisper-large-v3",
41
+ torch_dtype=torch.float16,
42
+ device=device,
43
+ )
44
+
45
+ # Generate with built-in chunking and stride
46
+ # chunk_length_s=30: Process 30s at a time
47
+ # stride_length_s=5: Overlap chunks by 5s to connect sentences correctly
48
+ outputs = pipe(
49
+ str(video_path),
50
+ chunk_length_s=30,
51
+ stride_length_s=5,
52
+ generate_kwargs={"task": "translate", "language": "hindi"}
53
+ )
54
+
55
+ # print(outputs["text"])
56
+ print(">>>>>>>>>>>>>>>>>>> :", create_srt_path)
57
+
58
+ torch.cuda.empty_cache()
59
+
60
+ # Save it so you don't lose it!
61
+ with open(create_srt_path, "w") as f:
62
+ f.write(outputs["text"])
63
+
64
+ else:
65
+ print('video_path are not working...')
66
+
67
+ # ---------------------------------------------------------------