[WIP] Upload folder using huggingface_hub (multi-commit afb63583912af55730066a45222b12c92987cc203e761c55ab6c4ddf3e5c729f)
#1
by
pain
- opened
- .gitattributes +0 -1
- MASC.py +0 -148
- README.md +0 -166
- audio/dev/dev_01.tar.gz +0 -3
- audio/test/test_01.tar.gz +0 -3
- audio/train/train_01.tar.xz +0 -3
- audio/train/train_02.tar.xz +0 -3
- audio/train/train_03.tar.xz +0 -3
- audio/train/train_04.tar.xz +0 -3
- audio/train/train_05.tar.xz +0 -3
- audio/train/train_06.tar.xz +0 -3
- audio/train/train_07.tar.xz +0 -3
- audio/train/train_08.tar.xz +0 -3
- transcript/dev/dev.csv +0 -0
- transcript/test/test.csv +0 -0
- transcript/train/train.csv +0 -3
.gitattributes
CHANGED
|
@@ -52,4 +52,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 55 |
-
transcript/train/train.csv filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 52 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 53 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
MASC.py
DELETED
|
@@ -1,148 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
""" MASC Dataset"""
|
| 16 |
-
|
| 17 |
-
# This script has been adopted from this dataset: "mozilla-foundation/common_voice_11_0"
|
| 18 |
-
|
| 19 |
-
import csv
|
| 20 |
-
import os
|
| 21 |
-
import json
|
| 22 |
-
|
| 23 |
-
import datasets
|
| 24 |
-
from datasets.utils.py_utils import size_str
|
| 25 |
-
from tqdm import tqdm
|
| 26 |
-
|
| 27 |
-
_CITATION = """\
|
| 28 |
-
@INPROCEEDINGS{10022652,
|
| 29 |
-
author={Al-Fetyani, Mohammad and Al-Barham, Muhammad and Abandah, Gheith and Alsharkawi, Adham and Dawas, Maha},
|
| 30 |
-
booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)},
|
| 31 |
-
title={MASC: Massive Arabic Speech Corpus},
|
| 32 |
-
year={2023},
|
| 33 |
-
volume={},
|
| 34 |
-
number={},
|
| 35 |
-
pages={1006-1013},
|
| 36 |
-
doi={10.1109/SLT54892.2023.10022652}}
|
| 37 |
-
}
|
| 38 |
-
"""
|
| 39 |
-
|
| 40 |
-
# TODO: Add description of the dataset here
|
| 41 |
-
# You can copy an official description
|
| 42 |
-
_DESCRIPTION = """\
|
| 43 |
-
MASC is a dataset that contains 1,000 hours of speech sampled at 16 kHz and crawled from over 700 YouTube channels. The dataset is multi-regional, multi-genre, and multi-dialect intended to advance the research and development of Arabic speech technology with a special emphasis on Arabic speech recognition.
|
| 44 |
-
"""
|
| 45 |
-
|
| 46 |
-
_HOMEPAGE = "https://ieee-dataport.org/open-access/masc-massive-arabic-speech-corpus"
|
| 47 |
-
_LICENSE = "https://creativecommons.org/licenses/by/4.0/"
|
| 48 |
-
_BASE_URL = "https://huggingface.co/datasets/pain/MASC/resolve/main/"
|
| 49 |
-
_AUDIO_URL1 = _BASE_URL + "audio/{split}/{split}_{shard_idx}.tar.gz"
|
| 50 |
-
_AUDIO_URL2 = _BASE_URL + "audio/{split}/{split}_{shard_idx}.tar.xz"
|
| 51 |
-
_TRANSCRIPT_URL = _BASE_URL + "transcript/{split}/{split}.csv"
|
| 52 |
-
|
| 53 |
-
class MASC(datasets.GeneratorBasedBuilder):
|
| 54 |
-
|
| 55 |
-
VERSION = datasets.Version("1.0.0")
|
| 56 |
-
|
| 57 |
-
def _info(self):
|
| 58 |
-
|
| 59 |
-
features = datasets.Features(
|
| 60 |
-
{
|
| 61 |
-
"video_id": datasets.Value("string"),
|
| 62 |
-
"start": datasets.Value("float64"),
|
| 63 |
-
"end": datasets.Value("float64"),
|
| 64 |
-
"duration": datasets.Value("float64"),
|
| 65 |
-
"text": datasets.Value("string"),
|
| 66 |
-
"type": datasets.Value("string"),
|
| 67 |
-
"file_path": datasets.Value("string"),
|
| 68 |
-
"audio": datasets.features.Audio(sampling_rate=16_000),
|
| 69 |
-
}
|
| 70 |
-
)
|
| 71 |
-
|
| 72 |
-
return datasets.DatasetInfo(
|
| 73 |
-
description=_DESCRIPTION,
|
| 74 |
-
features=features,
|
| 75 |
-
supervised_keys=None,
|
| 76 |
-
homepage=_HOMEPAGE,
|
| 77 |
-
license=_LICENSE,
|
| 78 |
-
citation=_CITATION,
|
| 79 |
-
version=self.config.version,
|
| 80 |
-
)
|
| 81 |
-
|
| 82 |
-
def _split_generators(self, dl_manager):
|
| 83 |
-
|
| 84 |
-
n_shards = {"train": 8,"dev": 1, "test": 1}
|
| 85 |
-
audio_urls = {}
|
| 86 |
-
splits = ("train", "dev", "test")
|
| 87 |
-
|
| 88 |
-
for split in splits:
|
| 89 |
-
audio_urls[split] = [
|
| 90 |
-
_AUDIO_URL2.format(split=split, shard_idx="{:02d}".format(i+1)) if split=="train" else _AUDIO_URL1.format(split=split, shard_idx="{:02d}".format(i+1)) for i in range(n_shards[split])
|
| 91 |
-
]
|
| 92 |
-
archive_paths = dl_manager.download(audio_urls)
|
| 93 |
-
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
| 94 |
-
|
| 95 |
-
meta_urls = {split: _TRANSCRIPT_URL.format(split=split) for split in splits}
|
| 96 |
-
|
| 97 |
-
meta_paths = dl_manager.download(meta_urls)
|
| 98 |
-
|
| 99 |
-
split_generators = []
|
| 100 |
-
split_names = {
|
| 101 |
-
"train": datasets.Split.TRAIN,
|
| 102 |
-
"dev": datasets.Split.VALIDATION,
|
| 103 |
-
"test": datasets.Split.TEST,
|
| 104 |
-
}
|
| 105 |
-
for split in splits:
|
| 106 |
-
split_generators.append(
|
| 107 |
-
datasets.SplitGenerator(
|
| 108 |
-
name=split_names.get(split, split),
|
| 109 |
-
gen_kwargs={
|
| 110 |
-
"local_extracted_archive_paths": local_extracted_archive_paths.get(split),
|
| 111 |
-
"archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
|
| 112 |
-
"meta_path": meta_paths[split],
|
| 113 |
-
},
|
| 114 |
-
),
|
| 115 |
-
)
|
| 116 |
-
|
| 117 |
-
return split_generators
|
| 118 |
-
|
| 119 |
-
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
| 120 |
-
data_fields = list(self._info().features.keys())
|
| 121 |
-
metadata = {}
|
| 122 |
-
with open(meta_path, encoding="utf-8") as f:
|
| 123 |
-
reader = csv.DictReader(f, delimiter=",", quoting=csv.QUOTE_NONE)
|
| 124 |
-
for row in reader:
|
| 125 |
-
if not row["file_path"].endswith(".wav"):
|
| 126 |
-
row["file_path"] += ".wav"
|
| 127 |
-
for field in data_fields:
|
| 128 |
-
if field not in row:
|
| 129 |
-
row[field] = ""
|
| 130 |
-
metadata[row["file_path"]] = row
|
| 131 |
-
|
| 132 |
-
for i, audio_archive in enumerate(archives):
|
| 133 |
-
for filename, file in audio_archive:
|
| 134 |
-
_, filename = os.path.split(filename)
|
| 135 |
-
if filename in metadata:
|
| 136 |
-
result = dict(metadata[filename])
|
| 137 |
-
# set the audio feature and the path to the extracted file
|
| 138 |
-
path = os.path.join(local_extracted_archive_paths[i], filename) if local_extracted_archive_paths else filename
|
| 139 |
-
|
| 140 |
-
try:
|
| 141 |
-
result["audio"] = {"path": path, "bytes": file.read()}
|
| 142 |
-
except ReadError as e:
|
| 143 |
-
# Handle the ReadError
|
| 144 |
-
print("An error occurred while reading the data:", str(e))
|
| 145 |
-
continiue
|
| 146 |
-
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
|
| 147 |
-
result["file_path"] = path if local_extracted_archive_paths else filename
|
| 148 |
-
yield path, result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
|
@@ -1,166 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
license:
|
| 3 |
-
- cc-by-4.0
|
| 4 |
-
size_categories:
|
| 5 |
-
ar:
|
| 6 |
-
- n==1k
|
| 7 |
-
task_categories:
|
| 8 |
-
- automatic-speech-recognition
|
| 9 |
-
task_ids: []
|
| 10 |
-
pretty_name: MASC dataset
|
| 11 |
-
extra_gated_prompt: >-
|
| 12 |
-
By clicking on “Access repository” below, you also agree to not attempt to
|
| 13 |
-
determine the identity of speakers in the MASC dataset.
|
| 14 |
-
language:
|
| 15 |
-
- ar
|
| 16 |
-
---
|
| 17 |
-
|
| 18 |
-
# Dataset Card for Common Voice Corpus 11.0
|
| 19 |
-
|
| 20 |
-
## Table of Contents
|
| 21 |
-
- [Dataset Description](#dataset-description)
|
| 22 |
-
- [Dataset Summary](#dataset-summary)
|
| 23 |
-
- [Languages](#languages)
|
| 24 |
-
- [How to use](#how-to-use)
|
| 25 |
-
- [Dataset Structure](#dataset-structure)
|
| 26 |
-
- [Data Instances](#data-instances)
|
| 27 |
-
- [Data Fields](#data-fields)
|
| 28 |
-
- [Data Splits](#data-splits)
|
| 29 |
-
- [Additional Information](#additional-information)
|
| 30 |
-
- [Citation Information](#citation-information)
|
| 31 |
-
|
| 32 |
-
## Dataset Description
|
| 33 |
-
|
| 34 |
-
- **Homepage:** https://ieee-dataport.org/open-access/masc-massive-arabic-speech-corpus
|
| 35 |
-
- **Paper:** https://ieeexplore.ieee.org/document/10022652
|
| 36 |
-
|
| 37 |
-
### Dataset Summary
|
| 38 |
-
|
| 39 |
-
MASC is a dataset that contains 1,000 hours of speech sampled at 16 kHz and crawled from over 700 YouTube channels.
|
| 40 |
-
The dataset is multi-regional, multi-genre, and multi-dialect intended to advance the research and development of Arabic speech technology with a special emphasis on Arabic speech recognition.
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
### Supported Tasks
|
| 44 |
-
|
| 45 |
-
- Automatics Speach Recognition
|
| 46 |
-
|
| 47 |
-
### Languages
|
| 48 |
-
|
| 49 |
-
```
|
| 50 |
-
Arabic
|
| 51 |
-
```
|
| 52 |
-
|
| 53 |
-
## How to use
|
| 54 |
-
|
| 55 |
-
The `datasets` library allows you to load and pre-process your dataset in pure Python, at scale. The dataset can be downloaded and prepared in one call to your local drive by using the `load_dataset` function.
|
| 56 |
-
|
| 57 |
-
```python
|
| 58 |
-
from datasets import load_dataset
|
| 59 |
-
|
| 60 |
-
masc = load_dataset("pain/MASC", split="train")
|
| 61 |
-
```
|
| 62 |
-
|
| 63 |
-
Using the datasets library, you can also stream the dataset on-the-fly by adding a `streaming=True` argument to the `load_dataset` function call. Loading a dataset in streaming mode loads individual samples of the dataset at a time, rather than downloading the entire dataset to disk.
|
| 64 |
-
```python
|
| 65 |
-
from datasets import load_dataset
|
| 66 |
-
|
| 67 |
-
masc = load_dataset("pain/MASC", split="train", streaming=True)
|
| 68 |
-
|
| 69 |
-
print(next(iter(masc)))
|
| 70 |
-
```
|
| 71 |
-
|
| 72 |
-
*Bonus*: create a [PyTorch dataloader](https://huggingface.co/docs/datasets/use_with_pytorch) directly with your own datasets (local/streamed).
|
| 73 |
-
|
| 74 |
-
### Local
|
| 75 |
-
|
| 76 |
-
```python
|
| 77 |
-
from datasets import load_dataset
|
| 78 |
-
from torch.utils.data.sampler import BatchSampler, RandomSampler
|
| 79 |
-
|
| 80 |
-
masc = load_dataset("pain/MASC", split="train")
|
| 81 |
-
batch_sampler = BatchSampler(RandomSampler(masc), batch_size=32, drop_last=False)
|
| 82 |
-
dataloader = DataLoader(masc, batch_sampler=batch_sampler)
|
| 83 |
-
```
|
| 84 |
-
|
| 85 |
-
### Streaming
|
| 86 |
-
|
| 87 |
-
```python
|
| 88 |
-
from datasets import load_dataset
|
| 89 |
-
from torch.utils.data import DataLoader
|
| 90 |
-
|
| 91 |
-
masc = load_dataset("pain/MASC", split="train")
|
| 92 |
-
dataloader = DataLoader(masc, batch_size=32)
|
| 93 |
-
```
|
| 94 |
-
|
| 95 |
-
To find out more about loading and preparing audio datasets, head over to [hf.co/blog/audio-datasets](https://huggingface.co/blog/audio-datasets).
|
| 96 |
-
|
| 97 |
-
### Example scripts
|
| 98 |
-
|
| 99 |
-
Train your own CTC or Seq2Seq Automatic Speech Recognition models on MASC with `transformers` - [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition).
|
| 100 |
-
|
| 101 |
-
## Dataset Structure
|
| 102 |
-
|
| 103 |
-
### Data Instances
|
| 104 |
-
|
| 105 |
-
A typical data point comprises the `path` to the audio file and its `sentence`.
|
| 106 |
-
|
| 107 |
-
```python
|
| 108 |
-
{'video_id': 'OGqz9G-JO0E', 'start': 770.6, 'end': 781.835, 'duration': 11.24,
|
| 109 |
-
'text': 'اللهم من ارادنا وبلادنا وبلاد المسلمين بسوء اللهم فاشغله في نفسه ورد كيده في نحره واجعل تدبيره تدميره يا رب العالمين',
|
| 110 |
-
'type': 'c', 'file_path': '87edeceb-5349-4210-89ad-8c3e91e54062_OGqz9G-JO0E.wav',
|
| 111 |
-
'audio': {'path': None,
|
| 112 |
-
'array': array([
|
| 113 |
-
0.05938721,
|
| 114 |
-
0.0539856,
|
| 115 |
-
0.03460693, ...,
|
| 116 |
-
0.00393677,
|
| 117 |
-
0.01745605,
|
| 118 |
-
0.03045654
|
| 119 |
-
]), 'sampling_rate': 16000
|
| 120 |
-
}
|
| 121 |
-
}
|
| 122 |
-
```
|
| 123 |
-
|
| 124 |
-
### Data Fields
|
| 125 |
-
|
| 126 |
-
`video_id` (`string`): An id for the video that the voice has been created from
|
| 127 |
-
|
| 128 |
-
`start` (`float64`): The start of the audio's chunk
|
| 129 |
-
|
| 130 |
-
`end` (`float64`): The end of the audio's chunk
|
| 131 |
-
|
| 132 |
-
`duration` (`float64`): The duration of the chunk
|
| 133 |
-
|
| 134 |
-
`text` (`string`): The text of the chunk
|
| 135 |
-
|
| 136 |
-
`audio` (`dict`): A dictionary containing the path to the downloaded audio file, the decoded audio array, and the sampling rate. Note that when accessing the audio column: `dataset[0]["audio"]` the audio file is automatically decoded and resampled to `dataset.features["audio"].sampling_rate`. Decoding and resampling of a large number of audio files might take a significant amount of time. Thus it is important to first query the sample index before the `"audio"` column, *i.e.* `dataset[0]["audio"]` should **always** be preferred over `dataset["audio"][0]`.
|
| 137 |
-
|
| 138 |
-
`type` (`string`): It refers to the data set type, either clean or noisy where "c: clean and n: noisy"
|
| 139 |
-
|
| 140 |
-
'file_path' (`string`): A path for the audio chunk
|
| 141 |
-
|
| 142 |
-
"audio" ("audio"): Audio for the chunk
|
| 143 |
-
|
| 144 |
-
### Data Splits
|
| 145 |
-
|
| 146 |
-
The speech material has been subdivided into portions for train, dev, test.
|
| 147 |
-
|
| 148 |
-
The dataset splits has clean and noisy data that can be determined by type field.
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
### Citation Information
|
| 154 |
-
|
| 155 |
-
```
|
| 156 |
-
@INPROCEEDINGS{10022652,
|
| 157 |
-
author={Al-Fetyani, Mohammad and Al-Barham, Muhammad and Abandah, Gheith and Alsharkawi, Adham and Dawas, Maha},
|
| 158 |
-
booktitle={2022 IEEE Spoken Language Technology Workshop (SLT)},
|
| 159 |
-
title={MASC: Massive Arabic Speech Corpus},
|
| 160 |
-
year={2023},
|
| 161 |
-
volume={},
|
| 162 |
-
number={},
|
| 163 |
-
pages={1006-1013},
|
| 164 |
-
doi={10.1109/SLT54892.2023.10022652}}
|
| 165 |
-
}
|
| 166 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
audio/dev/dev_01.tar.gz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:d2e84aa9f375c096e905ae061a6262d68a67f84cab821cab50e662fc260e1c72
|
| 3 |
-
size 2743579097
|
|
|
|
|
|
|
|
|
|
|
|
audio/test/test_01.tar.gz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:248d3c9b5183772fcfce29b1ed97d72a0a87b40c4ff1a30bf6ce6fa9ebe7a260
|
| 3 |
-
size 2521285897
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_01.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:bea6242cf6fe0b323d0a15e69d6f9bc621751c6db6c91ad3b515f9ad34f27d13
|
| 3 |
-
size 10300045596
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_02.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b7b854058e35837c14c7e909b818e75cd99f33c16867c4917365bf464141d093
|
| 3 |
-
size 10305029516
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_03.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:32b6eb36ea8bb197f15bc482e961fc97d4ebad77f1f5d3681564a00ea293a0c7
|
| 3 |
-
size 10224391500
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_04.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:63ce61cde337d6cb371be93fd0a160be1606b7496a331aaf7b0b6bd924213115
|
| 3 |
-
size 10303764044
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_05.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:b845dcbd7adfc16cbd2d2767ee3f8a9672891fd821808996f734642dc4dcb0df
|
| 3 |
-
size 10309230992
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_06.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:497fa98f5b1375bf84462f6b535ced11514d452cfa369cbbfd6620c867910f5b
|
| 3 |
-
size 10284059276
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_07.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:3cd1c977bc57a365fc0841916d8afe239be540def4f46b9da3149acafe2c1a96
|
| 3 |
-
size 10306431080
|
|
|
|
|
|
|
|
|
|
|
|
audio/train/train_08.tar.xz
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:3d4b24430bfb74039aa5594606e99ece3ac27e2b2dc9ed464bb3911b355afcc1
|
| 3 |
-
size 10261959148
|
|
|
|
|
|
|
|
|
|
|
|
transcript/dev/dev.csv
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
transcript/test/test.csv
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
transcript/train/train.csv
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:dc843ac611b1b3aedf80b31fc424ad73644b6a2400a837264d5fdda8fda30067
|
| 3 |
-
size 137182699
|
|
|
|
|
|
|
|
|
|
|
|