MSankara narutoelbruto commited on
Commit
3a80439
·
verified ·
0 Parent(s):

Duplicate from HuggingFaceM4/yttemporal180m

Browse files

Co-authored-by: naruto bruto <narutoelbruto@users.noreply.huggingface.co>

Files changed (3) hide show
  1. .gitattributes +37 -0
  2. README.md +3 -0
  3. yttemporal180m.py +136 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ # Audio files - uncompressed
29
+ *.pcm filter=lfs diff=lfs merge=lfs -text
30
+ *.sam filter=lfs diff=lfs merge=lfs -text
31
+ *.raw filter=lfs diff=lfs merge=lfs -text
32
+ # Audio files - compressed
33
+ *.aac filter=lfs diff=lfs merge=lfs -text
34
+ *.flac filter=lfs diff=lfs merge=lfs -text
35
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
36
+ *.ogg filter=lfs diff=lfs merge=lfs -text
37
+ *.wav filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: other
3
+ ---
yttemporal180m.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ import datetime
4
+
5
+ _CITATION = """
6
+ @inproceedings{zellersluhessel2021merlot,
7
+ title={MERLOT: Multimodal Neural Script Knowledge Models},
8
+ author={Zellers, Rowan and Lu, Ximing and Hessel, Jack and Yu, Youngjae and Park, Jae Sung and Cao, Jize and Farhadi, Ali and Choi, Yejin},
9
+ booktitle={Advances in Neural Information Processing Systems 34},
10
+ year={2021}
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ YT-Temporal-180M, a large and diverse dataset of 6 million videos (spanning 180M extracted frames)
16
+ that covers diverse topics.
17
+ """
18
+
19
+ _URL_BASE = "https://rowanzellers.com/merlot/#data"
20
+
21
+ url_numbers = ["00" + str(i) if i < 10 else "0" + str(i) for i in range(100)]
22
+ _DL_URLS = [
23
+ f"https://storage.googleapis.com/merlot/yttemporal180m/yttemporal180m_{num}of100.jsonl.gz"
24
+ for num in url_numbers
25
+ ]
26
+
27
+
28
+ def json_serializer(o):
29
+ if isinstance(o, datetime):
30
+ return str(o)
31
+
32
+ raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable")
33
+
34
+
35
+ class yttemporal180mConfig(datasets.BuilderConfig):
36
+ """BuilderConfig for ActivityNet Captions."""
37
+
38
+ def __init__(self, **kwargs):
39
+ super(yttemporal180mConfig, self).__init__(
40
+ version=datasets.Version("2.1.0", ""), **kwargs
41
+ )
42
+
43
+
44
+ class yttemporal180m(datasets.GeneratorBasedBuilder):
45
+
46
+ DEFAULT_CONFIG_NAME = "default"
47
+ BUILDER_CONFIGS = [
48
+ yttemporal180mConfig(
49
+ name="default", description="Default full yttemporal180m dataset"
50
+ ),
51
+ ]
52
+
53
+ def _info(self):
54
+ return datasets.DatasetInfo(
55
+ description=_DESCRIPTION,
56
+ features=datasets.Features(
57
+ {
58
+ "video_id": datasets.Value("string"),
59
+ "video_url": datasets.Value("string"),
60
+ "caption": datasets.Value("string"),
61
+ "timestamp_start": datasets.Value("float32"),
62
+ "timestamp_stop": datasets.Value("float32"),
63
+ "meta": datasets.Value("string"),
64
+ }
65
+ ),
66
+ supervised_keys=None,
67
+ homepage=_URL_BASE,
68
+ citation=_CITATION,
69
+ )
70
+
71
+ def _split_generators(self, dl_manager):
72
+ archive_paths = [dl_manager.download_and_extract(url) for url in _DL_URLS]
73
+
74
+ train_split = [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ gen_kwargs={"jsonl_files": archive_paths},
78
+ )
79
+ ]
80
+
81
+ return train_split
82
+
83
+ def _generate_examples(self, jsonl_files):
84
+ """This function returns the examples."""
85
+ idx = 0
86
+ for file in jsonl_files:
87
+ with open(file, encoding="utf-8") as jsonl_file:
88
+ json_list = list(jsonl_file)
89
+ for json_str in json_list:
90
+ infos = json.loads(json_str)
91
+
92
+ id = infos["info"]["display_id"]
93
+ url = "https://www.youtube.com/watch?v=" + id
94
+
95
+ # Divide video by segments of 15 sec
96
+ max_sec_per_segment = 15
97
+ last_caption_timestamp = infos["subtitles"][-1]["time"]
98
+ num_chunks = (
99
+ int(divmod(last_caption_timestamp, max_sec_per_segment)[0]) + 1
100
+ )
101
+ time_chunks = [
102
+ i * max_sec_per_segment for i in range(num_chunks + 1)
103
+ ]
104
+ time_chunk_idx = 0
105
+ caption = ""
106
+ for el in infos["subtitles"]:
107
+ if (
108
+ el["time"] > time_chunks[time_chunk_idx + 1]
109
+ or el["time"] == last_caption_timestamp
110
+ ):
111
+ timestamp_start = float(time_chunks[time_chunk_idx])
112
+ timestamp_stop = float(time_chunks[time_chunk_idx + 1])
113
+ time_chunk_idx += 1
114
+
115
+ metadata_dict = {
116
+ "asr_info": infos["denoised"],
117
+ "info": infos["info"],
118
+ "subtitles": infos["subtitles"],
119
+ "title": infos["info"]["title"],
120
+ }
121
+ yield idx, {
122
+ "video_id": id,
123
+ "video_url": url,
124
+ "caption": caption,
125
+ "timestamp_start": timestamp_start,
126
+ "timestamp_stop": timestamp_stop
127
+ if el["time"] != last_caption_timestamp
128
+ else last_caption_timestamp,
129
+ "meta": json.dumps(
130
+ metadata_dict, default=json_serializer, indent=2
131
+ ),
132
+ }
133
+ idx += 1
134
+ caption = ""
135
+ else:
136
+ caption += el["word"] + " "