Upload folder using huggingface_hub
Browse files- README.md +13 -1
- data/train.json +0 -0
- dataset_card.yaml +0 -5
- dsu_dataset.py +11 -11
README.md
CHANGED
|
@@ -2,4 +2,16 @@
|
|
| 2 |
|
| 3 |
This dataset contains discrete speech unit representations with various augmentations.
|
| 4 |
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
This dataset contains discrete speech unit representations with various augmentations.
|
| 4 |
|
| 5 |
+
## Dataset Structure
|
| 6 |
+
- 128 total examples
|
| 7 |
+
|
| 8 |
+
## Columns
|
| 9 |
+
- `id`: Sample identifier
|
| 10 |
+
- `tokens`: Discrete speech unit tokens
|
| 11 |
+
- `text`: Transcription text
|
| 12 |
+
- `augmentation`: Type of augmentation applied
|
| 13 |
+
|
| 14 |
+
## Augmentations
|
| 15 |
+
specaugment, gaussian_noise, speed_perturbation, original
|
| 16 |
+
|
| 17 |
+
```
|
data/train.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
dataset_card.yaml
CHANGED
|
@@ -4,12 +4,7 @@ language:
|
|
| 4 |
license: cc-by-4.0
|
| 5 |
task_categories:
|
| 6 |
- speech-processing
|
| 7 |
-
task_ids:
|
| 8 |
-
- speech-representation
|
| 9 |
-
size_categories:
|
| 10 |
-
- 100K<n<1M
|
| 11 |
tags:
|
| 12 |
- speech
|
| 13 |
- discrete-units
|
| 14 |
-
- augmentation
|
| 15 |
---
|
|
|
|
| 4 |
license: cc-by-4.0
|
| 5 |
task_categories:
|
| 6 |
- speech-processing
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
tags:
|
| 8 |
- speech
|
| 9 |
- discrete-units
|
|
|
|
| 10 |
---
|
dsu_dataset.py
CHANGED
|
@@ -2,14 +2,14 @@
|
|
| 2 |
import json
|
| 3 |
import datasets
|
| 4 |
|
| 5 |
-
_DESCRIPTION = "Discrete Speech Units Dataset"
|
| 6 |
-
|
| 7 |
class DsuDataset(datasets.GeneratorBasedBuilder):
|
| 8 |
-
|
|
|
|
|
|
|
| 9 |
|
| 10 |
def _info(self):
|
| 11 |
return datasets.DatasetInfo(
|
| 12 |
-
description=
|
| 13 |
features=datasets.Features({
|
| 14 |
"id": datasets.Value("string"),
|
| 15 |
"tokens": datasets.Sequence(datasets.Value("int32")),
|
|
@@ -22,17 +22,17 @@ class DsuDataset(datasets.GeneratorBasedBuilder):
|
|
| 22 |
return [
|
| 23 |
datasets.SplitGenerator(
|
| 24 |
name=datasets.Split.TRAIN,
|
| 25 |
-
gen_kwargs={"filepath": "train.json"},
|
| 26 |
)
|
| 27 |
]
|
| 28 |
|
| 29 |
def _generate_examples(self, filepath):
|
| 30 |
-
with open(filepath,
|
| 31 |
data = json.load(f)
|
| 32 |
-
for i,
|
| 33 |
yield i, {
|
| 34 |
-
"id":
|
| 35 |
-
"tokens":
|
| 36 |
-
"text":
|
| 37 |
-
"augmentation":
|
| 38 |
}
|
|
|
|
| 2 |
import json
|
| 3 |
import datasets
|
| 4 |
|
|
|
|
|
|
|
| 5 |
class DsuDataset(datasets.GeneratorBasedBuilder):
|
| 6 |
+
"""Discrete Speech Units dataset"""
|
| 7 |
+
|
| 8 |
+
VERSION = datasets.Version("1.0.0")
|
| 9 |
|
| 10 |
def _info(self):
|
| 11 |
return datasets.DatasetInfo(
|
| 12 |
+
description="Discrete Speech Units Dataset",
|
| 13 |
features=datasets.Features({
|
| 14 |
"id": datasets.Value("string"),
|
| 15 |
"tokens": datasets.Sequence(datasets.Value("int32")),
|
|
|
|
| 22 |
return [
|
| 23 |
datasets.SplitGenerator(
|
| 24 |
name=datasets.Split.TRAIN,
|
| 25 |
+
gen_kwargs={"filepath": "data/train.json"},
|
| 26 |
)
|
| 27 |
]
|
| 28 |
|
| 29 |
def _generate_examples(self, filepath):
|
| 30 |
+
with open(filepath, encoding="utf-8") as f:
|
| 31 |
data = json.load(f)
|
| 32 |
+
for i, item in enumerate(data):
|
| 33 |
yield i, {
|
| 34 |
+
"id": item["id"],
|
| 35 |
+
"tokens": item["tokens"],
|
| 36 |
+
"text": item["text"],
|
| 37 |
+
"augmentation": item["augmentation"],
|
| 38 |
}
|