dareenharthi commited on
Commit
802cbd9
·
verified ·
1 Parent(s): 1d85de0

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +2 -3
  2. dataset_card.yaml +14 -15
  3. dsu_dataset.py +38 -0
  4. train.json +0 -0
README.md CHANGED
@@ -1,6 +1,5 @@
1
  # Discrete Speech Units Dataset
2
 
3
- This dataset contains discrete speech unit representations with various augmentations.
4
 
5
- Columns: id, tokens, text, augmentation
6
-
 
1
  # Discrete Speech Units Dataset
2
 
3
+ This dataset contains discrete speech unit representations with various augmentations.
4
 
5
+ Columns: id, tokens, text, augmentation
 
dataset_card.yaml CHANGED
@@ -1,16 +1,15 @@
1
  ---
2
- language:
3
- - en
4
- license: cc-by-4.0
5
- task_categories:
6
- - speech-processing
7
- task_ids:
8
- - speech-representation
9
- size_categories:
10
- - 100K<n<1M
11
- tags:
12
- - speech
13
- - discrete-units
14
- - augmentation
15
- ---
16
-
 
1
  ---
2
+ language:
3
+ - en
4
+ license: cc-by-4.0
5
+ task_categories:
6
+ - speech-processing
7
+ task_ids:
8
+ - speech-representation
9
+ size_categories:
10
+ - 100K<n<1M
11
+ tags:
12
+ - speech
13
+ - discrete-units
14
+ - augmentation
15
+ ---
 
dsu_dataset.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import json
3
+ import datasets
4
+
5
+ _DESCRIPTION = "Discrete Speech Units Dataset"
6
+
7
+ class DsuDataset(datasets.GeneratorBasedBuilder):
8
+ DEFAULT_CONFIG_NAME = "default"
9
+
10
+ def _info(self):
11
+ return datasets.DatasetInfo(
12
+ description=_DESCRIPTION,
13
+ features=datasets.Features({
14
+ "id": datasets.Value("string"),
15
+ "tokens": datasets.Sequence(datasets.Value("int32")),
16
+ "text": datasets.Value("string"),
17
+ "augmentation": datasets.Value("string")
18
+ })
19
+ )
20
+
21
+ def _split_generators(self, dl_manager):
22
+ return [
23
+ datasets.SplitGenerator(
24
+ name=datasets.Split.TRAIN,
25
+ gen_kwargs={"filepath": "train.json"},
26
+ )
27
+ ]
28
+
29
+ def _generate_examples(self, filepath):
30
+ with open(filepath, "r", encoding="utf-8") as f:
31
+ data = json.load(f)
32
+ for i, example in enumerate(data):
33
+ yield i, {
34
+ "id": example["id"],
35
+ "tokens": example["tokens"],
36
+ "text": example["text"],
37
+ "augmentation": example["augmentation"],
38
+ }
train.json ADDED
The diff for this file is too large to render. See raw diff