nambh34 commited on
Commit
e9f552f
·
1 Parent(s): aec47fc

Initial dataset upload with LFS tracking for audio files

Browse files
Files changed (1) hide show
  1. generate_hf_meta.py +78 -0
generate_hf_meta.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import soundfile as sf
3
+ from tqdm import tqdm
4
+
5
+ def create_hf_metafile(dataset_type, base_data_path, output_meta_path):
6
+ subset_path = os.path.join(base_data_path, dataset_type) # Path to the train or test directory
7
+ prompts_file = os.path.join(subset_path, "prompts.txt")
8
+ waves_base_dir = os.path.join(subset_path, "waves")
9
+
10
+ if not os.path.exists(prompts_file):
11
+ print(f"Error: Cannot find file {prompts_file}")
12
+ return
13
+ if not os.path.exists(waves_base_dir):
14
+ print(f"Error: Cannot find directory {waves_base_dir}")
15
+ return
16
+
17
+ print(f"Processing {dataset_type}...")
18
+
19
+ # Read prompts.txt
20
+ prompt_data = {}
21
+ with open(prompts_file, "r", encoding="utf-8") as pf:
22
+ for line in pf:
23
+ try:
24
+ parts = line.strip().split(" ", 1)
25
+ if len(parts) == 2:
26
+ file_id, transcription = parts
27
+ prompt_data[file_id] = transcription.upper() # Convert to uppercase for consistency
28
+ except ValueError:
29
+ print(f"Ignoring line with incorrect format in {prompts_file}: {line.strip()}")
30
+ continue
31
+
32
+ with open(output_meta_path, "w", encoding="utf-8") as meta_f:
33
+ # Iterate through speaker directories in waves_base_dir
34
+ for speaker_dir in tqdm(os.listdir(waves_base_dir)):
35
+ speaker_path = os.path.join(waves_base_dir, speaker_dir)
36
+ if os.path.isdir(speaker_path):
37
+ for wav_filename in os.listdir(speaker_path):
38
+ if wav_filename.endswith(".wav"):
39
+ file_id_without_ext = os.path.splitext(wav_filename)[0]
40
+
41
+ if file_id_without_ext in prompt_data:
42
+ transcription = prompt_data[file_id_without_ext]
43
+ full_wav_path = os.path.join(speaker_path, wav_filename)
44
+
45
+ try:
46
+ frames = sf.SoundFile(full_wav_path).frames
47
+ samplerate = sf.SoundFile(full_wav_path).samplerate
48
+ duration = frames / samplerate
49
+ except Exception as e:
50
+ print(f"Error reading file {full_wav_path}: {e}. Skipping.")
51
+ continue
52
+
53
+ # Create relative path for Hugging Face Hub
54
+ # Example: train/waves/SPEAKER01/SPEAKER01_001.wav
55
+ relative_path = os.path.join(dataset_type, "waves", speaker_dir, wav_filename).replace(os.sep, '/')
56
+
57
+ meta_f.write(f"{relative_path}|{transcription}|{duration:.4f}\n")
58
+ # else:
59
+ # print(f"No transcription found for {file_id_without_ext} in {dataset_type}")
60
+ print(f"Meta file created: {output_meta_path}")
61
+
62
+ if __name__ == "__main__":
63
+ current_script_dir = os.path.dirname(os.path.abspath(__file__)) # Directory containing this script
64
+
65
+ # Generate meta file for training set
66
+ create_hf_metafile(
67
+ dataset_type="train",
68
+ base_data_path=current_script_dir, # Root directory of the dataset is the current directory
69
+ output_meta_path=os.path.join(current_script_dir, "train_meta.txt")
70
+ )
71
+
72
+ # Generate meta file for test set
73
+ create_hf_metafile(
74
+ dataset_type="test",
75
+ base_data_path=current_script_dir, # Root directory of the dataset is the current directory
76
+ output_meta_path=os.path.join(current_script_dir, "test_meta.txt")
77
+ )
78
+ print("Meta file creation completed.")