|
|
import datasets |
|
|
import os |
|
|
import glob |
|
|
import pandas as pd |
|
|
from pydub import AudioSegment |
|
|
import textgrid |
|
|
|
|
|
_DESCRIPTION = """ |
|
|
A custom dataset combining L2-ARCTIC and SpeechOcean for L2 English speech analysis. |
|
|
It includes non-native English speech from various L1s, transcripts, and phoneme alignments (from TextGrid). |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://example.com/your_dataset_homepage" |
|
|
|
|
|
_LICENSE = "Creative Commons Attribution 4.0 International Public License (CC-BY-4.0)" |
|
|
|
|
|
_L2_ARCTIC_SPEAKERS = ["ABA", "HJK", "MBMPS", "TXHC", "YBAA"] |
|
|
_L2_ARCTIC_L1_MAP = { |
|
|
"ABA": "Hindi", "HJK": "Korean", "MBMPS": "Mandarin", "TXHC": "Spanish", "YBAA": "Arabic" |
|
|
} |
|
|
|
|
|
class MyL2SpeechDataset(datasets.GeneratorBasedBuilder): |
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
features = datasets.Features({ |
|
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
|
"text": datasets.Value("string"), |
|
|
"speaker_id": datasets.Value("string"), |
|
|
"l1": datasets.Value("string"), |
|
|
"dataset_source": datasets.Value("string"), |
|
|
"phoneme_alignment": datasets.Sequence( |
|
|
{ |
|
|
"phoneme": datasets.Value("string"), |
|
|
"start_time": datasets.Value("float"), |
|
|
"end_time": datasets.Value("float"), |
|
|
} |
|
|
), |
|
|
"pronunciation_score": datasets.Value("float"), |
|
|
}) |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
l2_arctic_path = os.path.join("l2_arctic") |
|
|
|
|
|
speechocean_path = os.path.join( "speechocean") |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"l2_arctic_path": l2_arctic_path, |
|
|
"speechocean_path": speechocean_path, |
|
|
} |
|
|
) |
|
|
] |
|
|
|
|
|
def _generate_examples(self, l2_arctic_path, speechocean_path): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for speaker_id in _L2_ARCTIC_SPEAKERS: |
|
|
speaker_dir = os.path.join(l2_arctic_path, speaker_id) |
|
|
wav_dir = os.path.join(speaker_dir, "wav") |
|
|
transcript_dir = os.path.join(speaker_dir, "transcript") |
|
|
textgrid_dir = os.path.join(speaker_dir, "textgrid") |
|
|
|
|
|
|
|
|
wav_files = glob.glob(os.path.join(wav_dir, "*.wav")) |
|
|
for wav_file in wav_files: |
|
|
file_id = os.path.basename(wav_file).replace(".wav", "") |
|
|
transcript_file = os.path.join(transcript_dir, f"{file_id}.txt") |
|
|
textgrid_file = os.path.join(textgrid_dir, f"{file_id}.TextGrid") |
|
|
|
|
|
if not os.path.exists(transcript_file): |
|
|
print(f"Warning: Transcript file not found for {file_id}") |
|
|
continue |
|
|
if not os.path.exists(textgrid_file): |
|
|
print(f"Warning: TextGrid file not found for {file_id}") |
|
|
continue |
|
|
|
|
|
with open(transcript_file, "r", encoding="utf-8") as f: |
|
|
text = f.read().strip() |
|
|
|
|
|
phoneme_alignment_data = [] |
|
|
try: |
|
|
tg = textgrid.TextGrid.fromFile(textgrid_file) |
|
|
|
|
|
|
|
|
|
|
|
for tier_name in tg.tierNames(): |
|
|
if tier_name == 'phones': |
|
|
phone_tier = tg.getFirst(tier_name) |
|
|
for interval in phone_tier: |
|
|
phoneme_alignment_data.append({ |
|
|
"phoneme": interval.mark, |
|
|
"start_time": interval.minTime, |
|
|
"end_time": interval.maxTime, |
|
|
}) |
|
|
break |
|
|
except Exception as e: |
|
|
print(f"Error parsing TextGrid {textgrid_file}: {e}") |
|
|
phoneme_alignment_data = [] |
|
|
|
|
|
yield file_id, { |
|
|
"audio": wav_file, |
|
|
"text": text, |
|
|
"speaker_id": speaker_id, |
|
|
"l1": _L2_ARCTIC_L1_MAP.get(speaker_id, "unknown"), |
|
|
"dataset_source": "l2_arctic", |
|
|
"phoneme_alignment": phoneme_alignment_data, |
|
|
"pronunciation_score": -1.0, |
|
|
} |
|
|
|
|
|
|
|
|
speechocean_wav_dir = os.path.join(speechocean_path, "wavs") |
|
|
scores_df = pd.read_csv(os.path.join(speechocean_path, "train_scores.csv")) |
|
|
|
|
|
for index, row in scores_df.iterrows(): |
|
|
file_id = row['file_name'].replace(".wav", "") |
|
|
wav_file = os.path.join(speechocean_wav_dir, row['file_name']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
yield f"so_{file_id}", { |
|
|
"audio": wav_file, |
|
|
"text": row['text'], |
|
|
"speaker_id": str(row['speaker_id']), |
|
|
"l1": "Mandarin", |
|
|
"dataset_source": "speechocean", |
|
|
"phoneme_alignment": [], |
|
|
"pronunciation_score": row['score'], |
|
|
} |
|
|
|