sahilkadge commited on
Commit
399e131
·
verified ·
1 Parent(s): ef5d203

Update audio_data.py

Browse files
Files changed (1) hide show
  1. audio_data.py +22 -39
audio_data.py CHANGED
@@ -14,7 +14,7 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
- """TIMIT automatic speech recognition dataset."""
18
 
19
 
20
  import os
@@ -26,54 +26,48 @@ from datasets.tasks import AutomaticSpeechRecognition
26
 
27
  _CITATION = """\
28
  @inproceedings{
29
- title={TIMIT Acoustic-Phonetic Continuous Speech Corpus},
30
- author={Garofolo, John S., et al},
31
- ldc_catalog_no={LDC93S1},
32
- DOI={https://doi.org/10.35111/17gk-bn40},
33
- journal={Linguistic Data Consortium, Philadelphia},
34
- year={1983}
35
  }
36
  """
37
 
38
  _DESCRIPTION = """\
39
- The TIMIT corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
40
  and for the evaluation of automatic speech recognition systems.
41
- TIMIT contains high quality recordings of 630 individuals/speakers with 8 different American English dialects,
42
- with each individual reading upto 10 phonetically rich sentences.
43
- More info on TIMIT dataset can be understood from the "README" which can be found here:
44
- https://catalog.ldc.upenn.edu/docs/LDC93S1/readme.txt
45
  """
46
 
47
- _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC93S1"
48
 
49
 
50
- class TimitASRConfig(datasets.BuilderConfig):
51
- """BuilderConfig for TimitASR."""
52
 
53
  def __init__(self, **kwargs):
54
  """
55
  Args:
56
- data_dir: `string`, the path to the folder containing the files in the
57
- downloaded .tar
58
  citation: `string`, citation for the data set
59
  url: `string`, url for information about the data set
60
  **kwargs: keyword arguments forwarded to super.
61
  """
62
- super(TimitASRConfig, self).__init__(version=datasets.Version("2.0.1", ""), **kwargs)
63
 
64
 
65
- class TimitASR(datasets.GeneratorBasedBuilder):
66
- """TimitASR dataset."""
67
 
68
- BUILDER_CONFIGS = [TimitASRConfig(name="clean", description="'Clean' speech.")]
69
 
70
  @property
71
  def manual_download_instructions(self):
72
  return (
73
- "To use TIMIT you have to download it manually. "
74
- "Please create an account and download the dataset from https://catalog.ldc.upenn.edu/LDC93S1 \n"
75
- "Then extract all files in one folder and load the dataset with: "
76
- "`datasets.load_dataset('timit_asr', data_dir='path/to/folder/folder_name')`"
77
  )
78
 
79
  def _info(self):
@@ -84,12 +78,10 @@ class TimitASR(datasets.GeneratorBasedBuilder):
84
  "file": datasets.Value("string"),
85
  "audio": datasets.Audio(sampling_rate=16_000),
86
  "text": datasets.Value("string"),
87
-
88
-
89
  }
90
  ),
91
  supervised_keys=("file", "text"),
92
- homepage=_HOMEPAGE,
93
  citation=_CITATION,
94
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
95
  )
@@ -100,7 +92,7 @@ class TimitASR(datasets.GeneratorBasedBuilder):
100
 
101
  if not os.path.exists(data_dir):
102
  raise FileNotFoundError(
103
- f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('timit_asr', data_dir=...)` that includes files unzipped from the TIMIT zip. Manual download instructions: {self.manual_download_instructions}"
104
  )
105
 
106
  return [
@@ -109,10 +101,9 @@ class TimitASR(datasets.GeneratorBasedBuilder):
109
  ]
110
 
111
  def _generate_examples(self, split, data_dir):
112
- """Generate examples from TIMIT archive_path based on the test/train csv information."""
113
  # Iterating the contents of the data to extract the relevant information
114
  wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav"))
115
- wav_paths = wav_paths if wav_paths else sorted(Path(data_dir).glob(f"**/{split.upper()}/**/*.WAV"))
116
  for key, wav_path in enumerate(wav_paths):
117
 
118
  # extract transcript
@@ -120,18 +111,10 @@ class TimitASR(datasets.GeneratorBasedBuilder):
120
  with txt_path.open(encoding="utf-8") as op:
121
  transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number
122
 
123
- # extract phonemes
124
-
125
- # extract words
126
-
127
-
128
-
129
-
130
  example = {
131
  "file": str(wav_path),
132
  "audio": str(wav_path),
133
  "text": transcript,
134
-
135
  }
136
 
137
  yield key, example
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
+ """AudioData dataset."""
18
 
19
 
20
  import os
 
26
 
27
  _CITATION = """\
28
  @inproceedings{
29
+ title={AudioData Speech Corpus},
30
+ author={Your Name},
31
+ year={Year}
 
 
 
32
  }
33
  """
34
 
35
  _DESCRIPTION = """\
36
+ The AudioData corpus of reading speech has been developed to provide speech data for acoustic-phonetic research studies
37
  and for the evaluation of automatic speech recognition systems.
38
+ More info on AudioData dataset can be understood from the "README" which can be found here:
39
+ https://example.com/path/to/readme.txt
 
 
40
  """
41
 
42
+ # _HOMEPAGE = "https://example.com/path/to/dataset"
43
 
44
 
45
+ class AudioDataConfig(datasets.BuilderConfig):
46
+ """BuilderConfig for AudioData."""
47
 
48
  def __init__(self, **kwargs):
49
  """
50
  Args:
51
+ data_dir: `string`, the path to the folder containing the audio files
 
52
  citation: `string`, citation for the data set
53
  url: `string`, url for information about the data set
54
  **kwargs: keyword arguments forwarded to super.
55
  """
56
+ super(AudioDataConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
57
 
58
 
59
+ class AudioData(datasets.GeneratorBasedBuilder):
60
+ """AudioData dataset."""
61
 
62
+ BUILDER_CONFIGS = [AudioDataConfig(name="clean", description="'Clean' speech.")]
63
 
64
  @property
65
  def manual_download_instructions(self):
66
  return (
67
+ "To use AudioData you have to download it manually. "
68
+ "Please download the dataset from https://example.com/path/to/dataset \n"
69
+ "Then extract all audio files in one folder and load the dataset with: "
70
+ "`datasets.load_dataset('audio_data', data_dir='path/to/audio/folder')`"
71
  )
72
 
73
  def _info(self):
 
78
  "file": datasets.Value("string"),
79
  "audio": datasets.Audio(sampling_rate=16_000),
80
  "text": datasets.Value("string"),
 
 
81
  }
82
  ),
83
  supervised_keys=("file", "text"),
84
+ # homepage=_HOMEPAGE,
85
  citation=_CITATION,
86
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
87
  )
 
92
 
93
  if not os.path.exists(data_dir):
94
  raise FileNotFoundError(
95
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('audio_data', data_dir=...)` that includes audio files. Manual download instructions: {self.manual_download_instructions}"
96
  )
97
 
98
  return [
 
101
  ]
102
 
103
  def _generate_examples(self, split, data_dir):
104
+ """Generate examples from AudioData based on the test/train csv information."""
105
  # Iterating the contents of the data to extract the relevant information
106
  wav_paths = sorted(Path(data_dir).glob(f"**/{split}/**/*.wav"))
 
107
  for key, wav_path in enumerate(wav_paths):
108
 
109
  # extract transcript
 
111
  with txt_path.open(encoding="utf-8") as op:
112
  transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number
113
 
 
 
 
 
 
 
 
114
  example = {
115
  "file": str(wav_path),
116
  "audio": str(wav_path),
117
  "text": transcript,
 
118
  }
119
 
120
  yield key, example