subatomicseer commited on
Commit
5a0dcab
·
verified ·
1 Parent(s): 617c8d4

Create speech_flame_codes.py

Browse files
Files changed (1) hide show
  1. speech_flame_codes.py +84 -0
speech_flame_codes.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Lint as: python3
2
+ """semantic, acoustic and flame codes dataset.
3
+ """
4
+
5
+
6
+ import glob
7
+ import os
8
+
9
+ import datasets
10
+ import torch
11
+
12
+
13
+ class SpeechFlameCodesDatasetConfig(datasets.BuilderConfig):
14
+ """BuilderConfig for Speech-Flame Codes dataset."""
15
+
16
+ def __init__(self, **kwargs):
17
+ super(SpeechFlameCodesDatasetConfig, self).__init__(**kwargs)
18
+
19
+
20
+ class SpeechFlameCodesDataset(datasets.GeneratorBasedBuilder):
21
+ """Codes dataset."""
22
+
23
+ BUILDER_CONFIGS = [
24
+ SpeechFlameCodesDatasetConfig(name="all", description="SpeechFlameCodes dataset"),
25
+ ]
26
+
27
+ @property
28
+ def manual_download_instructions(self):
29
+ return (
30
+ "Codes should be computed before using this dataset. "
31
+ "`datasets.load_dataset('/path/to/this/script', name=all, data_dir='path/to/folder/folder_name/of/codes')`"
32
+ )
33
+
34
+ def _info(self):
35
+ features = datasets.Features(
36
+ {
37
+ "id": datasets.Value("string"),
38
+ "length": datasets.Value("int32"),
39
+ "acoustic_tokens": datasets.Array2D(shape=(None, 12), dtype="int16"),
40
+ "semantic_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
41
+ "flame_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
42
+ }
43
+ )
44
+
45
+ return datasets.DatasetInfo(
46
+ features=features,
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ base_data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir or ""))
51
+ if not os.path.exists(base_data_dir):
52
+ raise FileNotFoundError(
53
+ f"{base_data_dir} does not exist. Make sure you insert a manual dir via "
54
+ f"`datasets.load_dataset('/this/script', data_dir=...)` "
55
+ f"that includes code files .pt files "
56
+ f"dataset. Manual download instructions: {self.manual_download_instructions}"
57
+ )
58
+
59
+ train_data_dirs = glob.glob(os.path.join(base_data_dir, "*.pt"), recursive=False)
60
+ train_data_dirs = [d for d in train_data_dirs if '.ipynb_checkpoints' not in d]
61
+
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=str(datasets.Split.TRAIN),
65
+ gen_kwargs={"data_dirs": train_data_dirs},
66
+ ),
67
+ ]
68
+
69
+ def _generate_examples(self, data_dirs):
70
+ for key, path in enumerate(data_dirs):
71
+ id_ = path.split("/")[-1].replace(".pt", "")
72
+
73
+ data = torch.load(path, map_location="cpu")
74
+
75
+ acoustic_tokens = data["acoustic_codes"].transpose(0, 1)
76
+ semantic_tokens = data["semantic_codes"].unsqueeze(-1)
77
+ flame_tokens = data["flame_codes"].unsqueeze(-1)
78
+
79
+ yield id_, {
80
+ "id": id_,
81
+ "acoustic_tokens": acoustic_tokens,
82
+ "semantic_tokens": semantic_tokens,
83
+ "flame_tokens": flame_tokens,
84
+ }