rishi70612 commited on
Commit
e6f9ed2
·
verified ·
1 Parent(s): b5e49d3

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,59 +1,53 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
26
+ *.tflite filter=lfs diff=lfs merge=lfs -text
27
+ *.tgz filter=lfs diff=lfs merge=lfs -text
28
+ *.wasm filter=lfs diff=lfs merge=lfs -text
29
+ *.xz filter=lfs diff=lfs merge=lfs -text
30
+ *.zip filter=lfs diff=lfs merge=lfs -text
31
+ *.zst filter=lfs diff=lfs merge=lfs -text
32
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
33
+ # Audio files - uncompressed
34
+ *.pcm filter=lfs diff=lfs merge=lfs -text
35
+ *.sam filter=lfs diff=lfs merge=lfs -text
36
+ *.raw filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - compressed
38
+ *.aac filter=lfs diff=lfs merge=lfs -text
39
+ *.flac filter=lfs diff=lfs merge=lfs -text
40
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
41
+ *.ogg filter=lfs diff=lfs merge=lfs -text
42
+ *.wav filter=lfs diff=lfs merge=lfs -text
43
+ # Image files - uncompressed
44
+ *.bmp filter=lfs diff=lfs merge=lfs -text
45
+ *.gif filter=lfs diff=lfs merge=lfs -text
46
+ *.png filter=lfs diff=lfs merge=lfs -text
47
+ *.tiff filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - compressed
49
+ *.jpg filter=lfs diff=lfs merge=lfs -text
50
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
51
+ *.webp filter=lfs diff=lfs merge=lfs -text
52
+ *.tsv filter=lfs diff=lfs merge=lfs -text
53
+ data/*.tsv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
README.md CHANGED
@@ -1,3 +1,149 @@
1
  ---
2
- license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ license: cc-by-sa-4.0
3
+ dataset_info:
4
+ - config_name: original
5
+ features:
6
+ - name: utterance_id
7
+ dtype: string
8
+ - name: speaker_id
9
+ dtype: string
10
+ - name: utterance
11
+ dtype:
12
+ audio:
13
+ sampling_rate: 16000
14
+ - name: transcription
15
+ dtype: string
16
+ - name: num_frames
17
+ dtype: int32
18
+ splits:
19
+ - name: train
20
+ num_bytes: 40925646
21
+ num_examples: 157905
22
+ download_size: 9340083067
23
+ dataset_size: 40925646
24
+ - config_name: cleaned
25
+ features:
26
+ - name: utterance_id
27
+ dtype: string
28
+ - name: speaker_id
29
+ dtype: string
30
+ - name: utterance
31
+ dtype:
32
+ audio:
33
+ sampling_rate: 16000
34
+ - name: transcription
35
+ dtype: string
36
+ - name: num_frames
37
+ dtype: int32
38
+ splits:
39
+ - name: train
40
+ num_bytes: 40925646
41
+ num_examples: 157905
42
+ download_size: 5978669282
43
+ dataset_size: 40925646
44
  ---
45
+
46
+ # Dataset Card for OpenSLR Nepali Large ASR Cleaned
47
+
48
+ ## Table of Contents
49
+ - [Dataset Card for OpenSLR Nepali Large ASR Cleaned](#dataset-card-for-openslr-nepali-large-asr-cleaned)
50
+ - [Table of Contents](#table-of-contents)
51
+ - [Dataset Description](#dataset-description)
52
+ - [Dataset Summary](#dataset-summary)
53
+ - [How to use?](#how-to-use)
54
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
55
+ - [Languages](#languages)
56
+ - [Dataset Structure](#dataset-structure)
57
+ - [Data Instances](#data-instances)
58
+ - [Data Fields](#data-fields)
59
+ - [Data Splits](#data-splits)
60
+
61
+
62
+ ## Dataset Description
63
+
64
+ - **Homepage:** [Original OpenSLR Large Nepali ASR Dataset link](https://www.openslr.org/54/)
65
+ - **Repository:** [Needs More Information]
66
+ - **Paper:** [Needs More Information]
67
+ - **Leaderboard:** [Needs More Information]
68
+ - **Point of Contact:** [Sagar Sapkota](mailto:spkt.sagar@gmail.com)
69
+
70
+ ### Dataset Summary
71
+
72
+ This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
73
+ The data set has been manually quality-checked, but there might still be errors.
74
+
75
+ The audio files are sampled at a rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection.
76
+
77
+ For your reference, following was the function applied on each of the original openslr utterances.
78
+ ```python
79
+ import torchaudio
80
+
81
+ SAMPLING_RATE = 16000
82
+
83
+ def process_audio_file(orig_path, new_path):
84
+ """Read and process file in `orig_path` and save it to `new_path`"""
85
+ waveform, sampling_rate = torchaudio.load(orig_path)
86
+ if sampling_rate != SAMPLING_RATE:
87
+ waveform = torchaudio.functional.resample(waveform, sampling_rate, SAMPLING_RATE)
88
+ # trim end silences with Voice Activity Detection
89
+ waveform = torchaudio.functional.vad(waveform, sample_rate=SAMPLING_RATE)
90
+ torchaudio.save(new_path, waveform, sample_rate=SAMPLING_RATE)
91
+ ```
92
+
93
+ ### How to use?
94
+
95
+ There are two configurations for the data: one to download the original data and the other to download the preprocessed data as described above.
96
+ 1. First, to download the original dataset with HuggingFace's [Dataset](https://huggingface.co/docs/datasets/) API:
97
+ ```python
98
+ from datasets import load_dataset
99
+
100
+ dataset = load_dataset("spktsagar/openslr-nepali-asr-cleaned", name="original", split='train')
101
+ ```
102
+
103
+ 2. To download the preprocessed dataset:
104
+ ```python
105
+ from datasets import load_dataset
106
+
107
+ dataset = load_dataset("spktsagar/openslr-nepali-asr-cleaned", name="cleaned", split='train')
108
+ ```
109
+
110
+ ### Supported Tasks and Leaderboards
111
+
112
+ - `automatic-speech-recognition`: The dataset can be used to train a model for Automatic Speech Recognition.
113
+
114
+ ### Languages
115
+
116
+ Nepali
117
+
118
+ ## Dataset Structure
119
+
120
+ ### Data Instances
121
+
122
+ ```js
123
+ {
124
+ 'utterance_id': 'e1c4d414df',
125
+ 'speaker_id': '09da0',
126
+ 'utterance': {
127
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/e3cf9a618900289ecfd4a65356633d7438317f71c500cbed122960ab908e1e8a/cleaned/asr_nepali/data/e1/e1c4d414df.flac',
128
+ 'array': array([-0.00192261, -0.00204468, -0.00158691, ..., 0.00323486, 0.00256348, 0.00262451], dtype=float32),
129
+ 'sampling_rate': 16000
130
+ },
131
+ 'transcription': '२००५ मा बिते',
132
+ 'num_frames': 42300
133
+ }
134
+ ```
135
+
136
+ ### Data Fields
137
+
138
+ - utterance_id: a string identifying the utterances
139
+ - speaker_id: obfuscated unique id of the speaker whose utterances is in the current instance
140
+ - utterance:
141
+ - path: path to the utterance .flac file
142
+ - array: numpy array of the utterance
143
+ - sampling_rate: sample rate of the utterance
144
+ - transcription: Nepali text which spoken in the utterance
145
+ - num_frames: length of waveform array
146
+
147
+ ### Data Splits
148
+
149
+ The dataset is not split. The consumer should split it as per their requirements.
data/asr_nepali_0.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de2deb469abc4b52ee75b074d573b7465a9a0f71052726fcf26bcf65b0230896
3
+ size 378723044
data/asr_nepali_1.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202d8be53030fcbf0dd8e2ea38649d7d7080e8e3670026fac136cd61e0fb14a5
3
+ size 371802027
data/asr_nepali_2.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe04e2744214cf8f2119e8e23e9c832c89570fabc1a4f311de9bc2e0031f0cda
3
+ size 376239355
data/asr_nepali_3.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b321bc9463ff685888967835c58a41987f7454c8e249d989da1e299ebece469d
3
+ size 367029923
data/asr_nepali_4.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a7175f7bdf2a2d88e4d4a6c258fe6dcb630ae19ff63b698dac8c735746d2a80
3
+ size 372089464
data/asr_nepali_5.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1563a65297a6d8be29243cb4db6dad77474a9ecaa9022f6ca0040af5b415232d
3
+ size 365536369
data/asr_nepali_6.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bc48de39f00c4ec496209877631d71e0b951829d36c7c6c2ed8a8ed1f2d8d5b
3
+ size 375917941
data/asr_nepali_7.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f715d2cbb3a61e82aae5be388972c43c2c7a8aa01872e06aa3924646b9fa051
3
+ size 376755969
data/asr_nepali_8.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3ef0b72c7cbbb4192340b4ec6952f4ab15c92dd4bf49ba2a6d8a287e6b4160d
3
+ size 375456610
data/asr_nepali_9.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a2547e0ccba5d6d6900ce804838b1b80bc513faec9dbeb94a73420ee4b5806e
3
+ size 371440796
data/asr_nepali_a.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc26716d97be13afbfdee2335611fd87cd4ea3726e6edb9356948070f0f4b598
3
+ size 376212879
data/asr_nepali_b.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:128748056db0c617b6c68f354e688b76bb219670b6a32fa9ffb7db1db79f0572
3
+ size 373268942
data/asr_nepali_c.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddbf992f87427e94daa6b59efcc0eee0411f02fad670c740e4f06c369681c0b6
3
+ size 369528653
data/asr_nepali_d.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18c97612e97d904583fe6778f47e712a7a84d17d3ab85fffdcca6e807f259c9f
3
+ size 377083831
data/asr_nepali_e.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a7e22a425b7417b15d14129cd1c5570ceafddc6ae9d32288fe4cbc3cbb99c1
3
+ size 371259866
data/asr_nepali_f.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf2d6e231f38dad0bcecf921c95ecf664cc830ea337d2b14500178ea79b1bad
3
+ size 368333456
data/utt_spk_text_clean.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e92021d65a5f050e50bd9d5dcacdbb5e233c125bd6537fbc19d7e09a893770f
3
+ size 11990157
data/utt_spk_text_orig.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51d7b57bf2a7b439b246039cd5d491396ee259c46687bb89f9e2fca6d24a3917
3
+ size 11835705
openslr-nepali-asr-cleaned.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Leading and Trailing Silences Removed Large Nepali ASR Dataset"""
15
+
16
+ import os
17
+ import csv
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """\
23
+ @inproceedings{kjartansson-etal-sltu2018,
24
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
25
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
26
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
27
+ year = {2018},
28
+ address = {Gurugram, India},
29
+ month = aug,
30
+ pages = {52--55},
31
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ This data set contains transcribed audio data for Nepali. The data set consists of flac files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
37
+ The data set has been manually quality checked, but there might still be errors.
38
+
39
+ The audio files are sampled at rate of 16KHz, and leading and trailing silences are trimmed using torchaudio's voice activity detection.
40
+ """
41
+
42
+ # Official homepage for the dataset
43
+ _HOMEPAGE = "https://www.openslr.org/54/"
44
+
45
+ # The licence for the dataset
46
+ _LICENSE = "license:cc-by-sa-4.0"
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URLS = {
52
+ 'cleaned': {
53
+ "index_file": "https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/utt_spk_text_clean.tsv",
54
+ "zipfiles": [
55
+ f"https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/asr_nepali_{k}.zip"
56
+ for k in [*range(10), *'abcdef']
57
+ ],
58
+ },
59
+ 'original': {
60
+ "index_file": "https://huggingface.co/datasets/spktsagar/openslr-nepali-asr-cleaned/resolve/main/data/utt_spk_text_orig.tsv",
61
+ "zipfiles": [
62
+ f"https://www.openslr.org/resources/54/asr_nepali_{k}.zip"
63
+ for k in [*range(10), *'abcdef']
64
+ ],
65
+ },
66
+ }
67
+
68
+
69
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
70
+ class OpenslrNepaliAsrCleaned(datasets.GeneratorBasedBuilder):
71
+ """End Silences Removed Large Nepali ASR Dataset"""
72
+
73
+ VERSION = datasets.Version("1.0.0")
74
+
75
+ BUILDER_CONFIGS = [
76
+ datasets.BuilderConfig(name="original", version=VERSION,
77
+ description="All original utterances, speaker id and transcription from Openslr Large Nepali ASR Dataset"),
78
+ datasets.BuilderConfig(name="cleaned", version=VERSION,
79
+ description="All cleaned utterances, speaker id and transcription from Openslr Large Nepali ASR Dataset"),
80
+ ]
81
+
82
+ # It's not mandatory to have a default configuration. Just use one if it make sense.
83
+ DEFAULT_CONFIG_NAME = "original"
84
+
85
+ def _info(self):
86
+ features = datasets.Features(
87
+ {
88
+ "utterance_id": datasets.Value("string"),
89
+ "speaker_id": datasets.Value("string"),
90
+ "utterance": datasets.Audio(sampling_rate=16000),
91
+ "transcription": datasets.Value("string"),
92
+ "num_frames": datasets.Value("int32"),
93
+ }
94
+ )
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ # Here we define them above because they are different between the two configurations
98
+ features=features,
99
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
100
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
101
+ # supervised_keys=("sentence", "label"),
102
+ # Homepage of the dataset for documentation
103
+ homepage=_HOMEPAGE,
104
+ # License for the dataset if available
105
+ license=_LICENSE,
106
+ # Citation for the dataset
107
+ citation=_CITATION,
108
+ )
109
+
110
+ def _split_generators(self, dl_manager):
111
+ index_file = dl_manager.download(_URLS[self.config.name]['index_file'])
112
+ zip_paths = [item for sublist in [
113
+ dl_manager.download(
114
+ _URLS[self.config.name]['zipfiles'][i:i+4]
115
+ ) for i in range(0, len(_URLS[self.config.name]['zipfiles']), 4)
116
+ ] for item in sublist]
117
+ audio_paths = dict(zip([url[-5] for url in _URLS[self.config.name]["zipfiles"]],
118
+ dl_manager.extract(zip_paths)))
119
+ for path in zip_paths:
120
+ if os.path.exists(path):
121
+ os.remove(path)
122
+ return [
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.TRAIN,
125
+ gen_kwargs={
126
+ "index_file": index_file,
127
+ "audio_paths": audio_paths,
128
+ },
129
+ ),
130
+ ]
131
+
132
+ def _generate_examples(self, index_file, audio_paths):
133
+ with open(index_file, encoding="utf-8") as f:
134
+ reader = csv.DictReader(f, delimiter='\t')
135
+ for key, row in enumerate(reader):
136
+ if self.config.name == 'cleaned':
137
+ path = os.path.join(
138
+ audio_paths[row['utterance_id'][0]], 'cleaned',
139
+ 'asr_nepali', 'data', row['utterance_id'][:2],
140
+ f"{row['utterance_id']}.flac"
141
+ )
142
+ else:
143
+ path = os.path.join(
144
+ audio_paths[row['utterance_id'][0]],
145
+ 'asr_nepali', 'data', row['utterance_id'][:2],
146
+ f"{row['utterance_id']}.flac"
147
+ )
148
+ yield key, {
149
+ "utterance_id": row['utterance_id'],
150
+ "speaker_id": row['speaker_id'],
151
+ "utterance": path,
152
+ "transcription": row['transcription'],
153
+ "num_frames": int(row['num_frames']),
154
+ }