File size: 2,041 Bytes
f7c8e84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# Dataset Loading Script
# Save this as load_dataset.py to use

import csv
import os
from datasets import Dataset, Audio, Value, Features

def load_dataset():
    # Define features
    features = Features({
        # Preserve original sampling rates by not forcing a fixed rate
        "audio": Audio(sampling_rate=None),
        "text": Value("string"),
        "speaker_id": Value("string"),
        "language": Value("string"),
        "emotion": Value("string"),
        "original_dataset": Value("string"),
        "original_filename": Value("string"),
        "start_time": Value("float32"),
        "end_time": Value("float32"),
        "duration": Value("float32")
    })
    
    # Load data from CSV
    data = {
        "audio": [],
        "text": [],
        "speaker_id": [],
        "language": [],
        "emotion": [],
        "original_dataset": [],
        "original_filename": [],
        "start_time": [],
        "end_time": [],
        "duration": []
    }
    
    # Read JSONL
    import json
    with open("data.jsonl", "r", encoding="utf-8") as f:
        for line in f:
            obj = json.loads(line)
            data["audio"].append(obj["audio"])  # relative path within repo
            data["text"].append(obj.get("text", ""))
            data["speaker_id"].append(obj.get("speaker_id", ""))
            data["language"].append(obj.get("language", "en"))
            data["emotion"].append(obj.get("emotion", "neutral"))
            data["original_dataset"].append(obj.get("original_dataset", ""))
            data["original_filename"].append(obj.get("original_filename", ""))
            data["start_time"].append(obj.get("start_time", 0.0))
            data["end_time"].append(obj.get("end_time", 0.0))
            data["duration"].append(obj.get("duration", 0.0))
    
    # Create dataset
    dataset = Dataset.from_dict(data, features=features)
    return dataset

# For direct loading
if __name__ == "__main__":
    dataset = load_dataset()
    print(f"Dataset loaded with {len(dataset)} examples")