| """ |
| Script to prepare and upload Surah Al-Ikhlas Error Detection Dataset to Hugging Face. |
| |
| Dataset: Audio recordings of Quran recitations with error labels |
| - 1506 WAV files |
| - Labels encoded in filename: ID{participant}V{verse}{T/F} |
| - T = True (correct recitation) |
| - F = False (contains error) |
| """ |
|
|
| import os |
| import re |
| import pandas as pd |
| from pathlib import Path |
| from datasets import Dataset, Audio, Features, Value, ClassLabel |
| import json |
|
|
| |
| DATASET_PATH = "/Users/muaz/Downloads/Surah Al-Ikhlas of the Holy Quran Error Detection Dataset/Dataset and Sounds" |
| EXCEL_PATH = os.path.join(DATASET_PATH, "Dataset.xlsx") |
| AUDIO_PATH = os.path.join(DATASET_PATH, "Sound recordings") |
| OUTPUT_PATH = "/Users/muaz/cursor/IkhlasDataset" |
|
|
| |
| VERSE_TEXTS = { |
| 1: "قُلْ هُوَ اللَّهُ أَحَدٌ", |
| 2: "اللَّهُ الصَّمَدُ", |
| 3: "لَمْ يَلِدْ وَلَمْ يُولَدْ", |
| 4: "وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ" |
| } |
|
|
| def parse_filename(filename): |
| """Parse filename like 'ID100V1F.wav' to extract components.""" |
| match = re.match(r'ID(\d+)V(\d)([TF])\.wav', filename) |
| if match: |
| return { |
| 'participant_id': int(match.group(1)), |
| 'verse': int(match.group(2)), |
| 'is_correct': match.group(3) == 'T', |
| 'label': 1 if match.group(3) == 'T' else 0, |
| 'label_text': 'correct' if match.group(3) == 'T' else 'error' |
| } |
| return None |
|
|
| def load_and_prepare_data(): |
| """Load audio files and Excel metadata.""" |
| print("=" * 60) |
| print("Loading and preparing dataset...") |
| print("=" * 60) |
| |
| |
| audio_files = list(Path(AUDIO_PATH).glob("*.wav")) |
| print(f"\nFound {len(audio_files)} audio files") |
| |
| |
| data = [] |
| for audio_file in audio_files: |
| parsed = parse_filename(audio_file.name) |
| if parsed: |
| parsed['filename'] = audio_file.name |
| parsed['audio_path'] = str(audio_file) |
| data.append(parsed) |
| |
| df = pd.DataFrame(data) |
| print(f"Parsed {len(df)} files successfully") |
| |
| |
| print("\nLoading Excel metadata...") |
| excel_df = pd.read_excel(EXCEL_PATH, sheet_name='Sheet1') |
| excel_df.columns = [col.strip() for col in excel_df.columns] |
| |
| |
| df = df.sort_values(['participant_id', 'verse']).reset_index(drop=True) |
| |
| |
| df['verse_text'] = df['verse'].map(VERSE_TEXTS) |
| |
| |
| if len(df) == len(excel_df): |
| excel_df = excel_df.reset_index(drop=True) |
| |
| |
| df['error_type'] = excel_df['Error type'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_location'] = excel_df['Error location'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_explanation'] = excel_df['Error explanation'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_count'] = excel_df['Error number'].fillna(0).astype(int) |
| |
| print("Merged Excel metadata successfully!") |
| else: |
| df['error_type'] = '' |
| df['error_location'] = '' |
| df['error_explanation'] = '' |
| df['error_count'] = 0 |
| print(f"Warning: Excel rows ({len(excel_df)}) don't match audio files ({len(df)})") |
| |
| print(f"\nLabel distribution:") |
| print(df['label_text'].value_counts()) |
| |
| print(f"\nVerse distribution:") |
| print(df['verse'].value_counts().sort_index()) |
| |
| print(f"\nUnique participants: {df['participant_id'].nunique()}") |
| |
| return df |
|
|
| def create_hf_dataset(df): |
| """Create Hugging Face dataset from DataFrame.""" |
| print("\n" + "=" * 60) |
| print("Creating Hugging Face dataset...") |
| print("=" * 60) |
| |
| |
| data = { |
| 'audio': df['audio_path'].tolist(), |
| 'label': df['label'].tolist(), |
| 'participant_id': df['participant_id'].tolist(), |
| 'verse_number': df['verse'].tolist(), |
| 'verse_text': df['verse_text'].tolist(), |
| 'error_type': df['error_type'].tolist(), |
| 'error_location': df['error_location'].tolist(), |
| 'error_explanation': df['error_explanation'].tolist(), |
| 'error_count': df['error_count'].tolist(), |
| } |
| |
| |
| dataset = Dataset.from_dict(data) |
| |
| |
| dataset = dataset.cast_column('label', ClassLabel(names=['error', 'correct'])) |
| |
| |
| dataset = dataset.cast_column('audio', Audio(sampling_rate=16000)) |
| |
| |
| dataset = dataset.train_test_split(test_size=0.2, seed=42, stratify_by_column='label') |
| |
| print(f"\nDataset created:") |
| print(f" Train: {len(dataset['train'])} samples") |
| print(f" Test: {len(dataset['test'])} samples") |
| |
| |
| train_labels = dataset['train']['label'] |
| test_labels = dataset['test']['label'] |
| print(f"\n Train label distribution: error={train_labels.count(0)}, correct={train_labels.count(1)}") |
| print(f" Test label distribution: error={test_labels.count(0)}, correct={test_labels.count(1)}") |
| |
| return dataset |
|
|
| def create_dataset_card(): |
| """Create README.md for the dataset.""" |
| readme_content = """--- |
| license: cc-by-4.0 |
| task_categories: |
| - audio-classification |
| language: |
| - ar |
| tags: |
| - quran |
| - tajweed |
| - recitation |
| - error-detection |
| - arabic |
| - audio |
| - speech |
| - islam |
| pretty_name: Surah Al-Ikhlas Quran Recitation Error Detection Dataset |
| size_categories: |
| - 1K<n<10K |
| dataset_info: |
| features: |
| - name: audio |
| dtype: audio |
| - name: label |
| dtype: |
| class_label: |
| names: |
| '0': error |
| '1': correct |
| - name: participant_id |
| dtype: int32 |
| - name: verse_number |
| dtype: int32 |
| - name: verse_text |
| dtype: string |
| - name: error_type |
| dtype: string |
| - name: error_location |
| dtype: string |
| - name: error_explanation |
| dtype: string |
| - name: error_count |
| dtype: int32 |
| splits: |
| - name: train |
| num_examples: 1204 |
| - name: test |
| num_examples: 302 |
| --- |
| |
| # Surah Al-Ikhlas Quran Recitation Error Detection Dataset |
| |
| ## Dataset Description |
| |
| This dataset contains audio recordings of Quran recitations of **Surah Al-Ikhlas** (Chapter 112 - The Sincerity) with labels indicating whether each recitation contains errors in Tajweed (Quran recitation rules). |
| |
| ### Dataset Summary |
| |
| | Statistic | Value | |
| |-----------|-------| |
| | **Total Samples** | 1,506 | |
| | **Correct Recitations** | 655 (43.5%) | |
| | **Error Recitations** | 851 (56.5%) | |
| | **Unique Participants** | 384 | |
| | **Verses** | 4 | |
| | **Audio Format** | WAV | |
| | **Language** | Arabic | |
| |
| ### Surah Al-Ikhlas Text |
| |
| | Verse | Arabic | Transliteration | Translation | |
| |-------|--------|-----------------|-------------| |
| | 1 | قُلْ هُوَ اللَّهُ أَحَدٌ | Qul huwa Allahu ahad | Say, "He is Allah, [who is] One" | |
| | 2 | اللَّهُ الصَّمَدُ | Allahu assamad | "Allah, the Eternal Refuge" | |
| | 3 | لَمْ يَلِدْ وَلَمْ يُولَدْ | Lam yalid walam yulad | "He neither begets nor is born" | |
| | 4 | وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ | Walam yakun lahu kufuwan ahad | "Nor is there to Him any equivalent" | |
| |
| ## Dataset Structure |
| |
| ### Data Fields |
| |
| | Field | Type | Description | |
| |-------|------|-------------| |
| | `audio` | Audio | Audio file (WAV format, 16kHz) | |
| | `label` | ClassLabel | 0 = error, 1 = correct | |
| | `participant_id` | int32 | Unique identifier for the reciter | |
| | `verse_number` | int32 | Verse number (1-4) | |
| | `verse_text` | string | Arabic text of the verse | |
| | `error_type` | string | Type of Tajweed error (Arabic, if applicable) | |
| | `error_location` | string | Location of error in the verse | |
| | `error_explanation` | string | Explanation of the error (Arabic) | |
| | `error_count` | int32 | Number of errors in the recitation | |
| |
| ### Data Splits |
| |
| | Split | Samples | Error | Correct | |
| |-------|---------|-------|---------| |
| | Train | 1,204 | 680 | 524 | |
| | Test | 302 | 171 | 131 | |
| |
| ## Usage |
| |
| ```python |
| from datasets import load_dataset |
| |
| # Load the dataset |
| dataset = load_dataset("YOUR_USERNAME/surah-al-ikhlas-error-detection") |
| |
| # Access training data |
| train_data = dataset['train'] |
| |
| # Example: Get first sample |
| sample = train_data[0] |
| print(f"Label: {'Correct' if sample['label'] == 1 else 'Error'}") |
| print(f"Verse {sample['verse_number']}: {sample['verse_text']}") |
| print(f"Audio sampling rate: {sample['audio']['sampling_rate']} Hz") |
| |
| # Filter by label |
| correct_samples = train_data.filter(lambda x: x['label'] == 1) |
| error_samples = train_data.filter(lambda x: x['label'] == 0) |
| ``` |
| |
| ### Training Example |
| |
| ```python |
| from datasets import load_dataset |
| from transformers import AutoFeatureExtractor, AutoModelForAudioClassification, TrainingArguments, Trainer |
| import evaluate |
| |
| # Load dataset |
| dataset = load_dataset("YOUR_USERNAME/surah-al-ikhlas-error-detection") |
| |
| # Load model and feature extractor |
| model_name = "facebook/wav2vec2-base" |
| feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) |
| model = AutoModelForAudioClassification.from_pretrained( |
| model_name, |
| num_labels=2, |
| label2id={"error": 0, "correct": 1}, |
| id2label={0: "error", 1: "correct"} |
| ) |
| |
| # Preprocess |
| def preprocess(examples): |
| audio_arrays = [x["array"] for x in examples["audio"]] |
| inputs = feature_extractor( |
| audio_arrays, |
| sampling_rate=16000, |
| padding=True, |
| return_tensors="pt" |
| ) |
| inputs["labels"] = examples["label"] |
| return inputs |
| |
| dataset = dataset.map(preprocess, batched=True, remove_columns=["audio"]) |
| |
| # Train |
| training_args = TrainingArguments( |
| output_dir="./results", |
| evaluation_strategy="epoch", |
| num_train_epochs=5, |
| per_device_train_batch_size=8, |
| ) |
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=dataset["train"], |
| eval_dataset=dataset["test"], |
| ) |
| |
| trainer.train() |
| ``` |
| |
| ## Applications |
| |
| This dataset can be used for: |
| |
| - 🎯 Training audio classification models for Tajweed error detection |
| - 📱 Building Quran recitation assessment applications |
| - 🔬 Research in Arabic speech processing |
| - 📚 Educational tools for learning proper Quran recitation |
| - 🤖 Developing AI-assisted Quran tutoring systems |
| |
| ## Error Types |
| |
| The dataset includes various Tajweed errors such as: |
| - Errors in Qalqalah (قلقلة) - echoing sounds |
| - Errors in letter pronunciation |
| - Errors in elongation (Madd) |
| - And other Tajweed rule violations |
| |
| ## Citation |
| |
| If you use this dataset in your research, please cite it appropriately. |
| |
| ## License |
| |
| This dataset is released under the [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/) license. |
| """ |
| |
| readme_path = os.path.join(OUTPUT_PATH, "README.md") |
| with open(readme_path, 'w', encoding='utf-8') as f: |
| f.write(readme_content) |
| |
| print(f"\nDataset card created: {readme_path}") |
| return readme_path |
|
|
| def main(): |
| |
| df = load_and_prepare_data() |
| |
| |
| dataset = create_hf_dataset(df) |
| |
| |
| create_dataset_card() |
| |
| |
| metadata_path = os.path.join(OUTPUT_PATH, "metadata.csv") |
| df.to_csv(metadata_path, index=False) |
| print(f"\nMetadata saved to: {metadata_path}") |
| |
| |
| print("\n" + "=" * 60) |
| print("READY TO UPLOAD TO HUGGING FACE") |
| print("=" * 60) |
| print(""" |
| Your dataset is prepared! To upload to Hugging Face: |
| |
| 1. First, login to Hugging Face CLI: |
| |
| huggingface-cli login |
| |
| 2. Then run the upload script: |
| |
| python3 upload_to_hf.py |
| |
| Make sure to edit upload_to_hf.py and replace YOUR_USERNAME with your |
| actual Hugging Face username before running! |
| """) |
| |
| |
| upload_script = '''"""Upload dataset to Hugging Face Hub""" |
| import os |
| import re |
| import pandas as pd |
| from pathlib import Path |
| from datasets import Dataset, Audio, ClassLabel |
| |
| # Configuration - CHANGE THIS TO YOUR USERNAME |
| HF_USERNAME = "YOUR_USERNAME" # <-- Change this! |
| REPO_NAME = "surah-al-ikhlas-error-detection" |
| |
| # Paths |
| DATASET_PATH = "/Users/muaz/Downloads/Surah Al-Ikhlas of the Holy Quran Error Detection Dataset/Dataset and Sounds" |
| EXCEL_PATH = os.path.join(DATASET_PATH, "Dataset.xlsx") |
| AUDIO_PATH = os.path.join(DATASET_PATH, "Sound recordings") |
| |
| VERSE_TEXTS = { |
| 1: "قُلْ هُوَ اللَّهُ أَحَدٌ", |
| 2: "اللَّهُ الصَّمَدُ", |
| 3: "لَمْ يَلِدْ وَلَمْ يُولَدْ", |
| 4: "وَلَمْ يَكُن لَّهُ كُفُوًا أَحَدٌ" |
| } |
| |
| def parse_filename(filename): |
| match = re.match(r'ID(\\d+)V(\\d)([TF])\\.wav', filename) |
| if match: |
| return { |
| 'participant_id': int(match.group(1)), |
| 'verse': int(match.group(2)), |
| 'label': 1 if match.group(3) == 'T' else 0, |
| } |
| return None |
| |
| print("Loading dataset...") |
| |
| # Get all audio files |
| audio_files = list(Path(AUDIO_PATH).glob("*.wav")) |
| |
| # Parse all filenames |
| data = [] |
| for audio_file in audio_files: |
| parsed = parse_filename(audio_file.name) |
| if parsed: |
| parsed['audio_path'] = str(audio_file) |
| data.append(parsed) |
| |
| df = pd.DataFrame(data) |
| df = df.sort_values(['participant_id', 'verse']).reset_index(drop=True) |
| df['verse_text'] = df['verse'].map(VERSE_TEXTS) |
| |
| # Load Excel metadata |
| excel_df = pd.read_excel(EXCEL_PATH, sheet_name='Sheet1') |
| excel_df.columns = [col.strip() for col in excel_df.columns] |
| excel_df = excel_df.reset_index(drop=True) |
| |
| df['error_type'] = excel_df['Error type'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_location'] = excel_df['Error location'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_explanation'] = excel_df['Error explanation'].apply(lambda x: '' if x == 0 or pd.isna(x) else str(x)) |
| df['error_count'] = excel_df['Error number'].fillna(0).astype(int) |
| |
| print(f"Loaded {len(df)} samples") |
| |
| # Create dataset |
| dataset_dict = { |
| 'audio': df['audio_path'].tolist(), |
| 'label': df['label'].tolist(), |
| 'participant_id': df['participant_id'].tolist(), |
| 'verse_number': df['verse'].tolist(), |
| 'verse_text': df['verse_text'].tolist(), |
| 'error_type': df['error_type'].tolist(), |
| 'error_location': df['error_location'].tolist(), |
| 'error_explanation': df['error_explanation'].tolist(), |
| 'error_count': df['error_count'].tolist(), |
| } |
| |
| dataset = Dataset.from_dict(dataset_dict) |
| dataset = dataset.cast_column('label', ClassLabel(names=['error', 'correct'])) |
| dataset = dataset.cast_column('audio', Audio(sampling_rate=16000)) |
| |
| # Split |
| dataset = dataset.train_test_split(test_size=0.2, seed=42, stratify_by_column='label') |
| |
| print(f"Train: {len(dataset['train'])}, Test: {len(dataset['test'])}") |
| |
| # Upload |
| print(f"\\nUploading to {HF_USERNAME}/{REPO_NAME}...") |
| dataset.push_to_hub( |
| f"{HF_USERNAME}/{REPO_NAME}", |
| private=False |
| ) |
| |
| print(f"\\n✅ Upload complete!") |
| print(f"View your dataset at: https://huggingface.co/datasets/{HF_USERNAME}/{REPO_NAME}") |
| ''' |
| |
| upload_path = os.path.join(OUTPUT_PATH, "upload_to_hf.py") |
| with open(upload_path, 'w') as f: |
| f.write(upload_script) |
| |
| print(f"Upload script created: {upload_path}") |
| |
| return dataset |
|
|
| if __name__ == "__main__": |
| dataset = main() |
|
|