Sin2pi commited on
Commit
4073240
·
verified ·
1 Parent(s): f544df3

Create dataset_download.py

Browse files
Files changed (1) hide show
  1. dataset_download.py +122 -0
dataset_download.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datasets import load_dataset, Audio
3
+ import soundfile as sf, os, re, neologdn, librosa
4
+ from tqdm import tqdm
5
+ import shutil
6
+
7
+ def have(a):
8
+ return a is not None
9
+
10
+ def aorb(a, b):
11
+ return a if have(a) else b
12
+
13
+ dataset = load_dataset("Sin2pi/JA_audio_JA_text_180k_samples", trust_remote_code=True)["train"].filter(lambda sample: bool(sample["sentence" if "sentence" in sample else aorb("text", "transcription")]))
14
+ name = "JA_audio_JA_text_180k"
15
+
16
+ ouput_dir = "./datasets/"
17
+ out_file = 'metadata.csv'
18
+ os.makedirs(ouput_dir + name, exist_ok=True)
19
+ folder_path = ouput_dir + name
20
+
21
+ top_db=30
22
+
23
+ def is_silent(mp3_file, threshold=0.025):
24
+ if not os.path.exists(mp3_file):
25
+ return True
26
+ y, sr = librosa.load(mp3_file, sr=None)
27
+ rms = librosa.feature.rms(y=y)[0]
28
+ return all(value < threshold for value in rms)
29
+
30
+ def remove_silence(input_file, output_file, top_db=top_db):
31
+ y, sr = sf.read(input_file)
32
+ intervals = librosa.effects.split(y, top_db=top_db)
33
+ y_trimmed = []
34
+ for start, end in intervals:
35
+ y_trimmed.extend(y[start:end])
36
+ if not os.path.exists(output_file):
37
+ sf.write(output_file, y_trimmed, sr)
38
+ with open(csv_file2, "a", encoding='utf-8') as f:
39
+ file_name = os.path.basename(output_file)
40
+ f.write(file_name + "\n")
41
+
42
+ def process_directory(input_dir, output_dir, top_db=top_db):
43
+ if not os.path.exists(output_dir):
44
+ os.makedirs(output_dir)
45
+ if not os.path.exists(removed_dir):
46
+ os.makedirs(removed_dir)
47
+ open(csv_file, 'w', encoding='utf-8').close()
48
+ open(csv_file2, 'w', encoding='utf-8').close()
49
+
50
+ for filename in os.listdir(input_dir):
51
+ if filename.endswith(".mp3"):
52
+ input_file = os.path.join(input_dir, filename)
53
+ output_file = os.path.join(output_dir, filename)
54
+ removed_file = os.path.join(removed_dir, filename)
55
+
56
+ if not os.path.exists(output_file):
57
+ remove_silence(input_file, output_file, top_db)
58
+
59
+ if os.path.exists(output_file) and is_silent(output_file):
60
+ with open(csv_file, "a", encoding='utf-8') as f:
61
+ f.write(os.path.basename(output_file) + "\n")
62
+ shutil.move(output_file, removed_file)
63
+
64
+ if os.path.exists(input_file):
65
+ os.remove(input_file)
66
+
67
+ input_dir = folder_path
68
+ output_dir = folder_path + "/trimmed/"
69
+ removed_dir = folder_path + "/removed/"
70
+ csv_file = folder_path + "/removed.csv"
71
+ csv_file2 = folder_path + "/not_removed.csv"
72
+
73
+ min_char = 4
74
+ max = 20.0
75
+ min = 1.0
76
+
77
+ char = '[ 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890♬♪♩♫]'
78
+ special_characters = '[“%‘”~゛#$%&()*+:;〈=〉@^_{|}~"█』『.;:<>_()*&^$#@`, ]'
79
+
80
+ # dataset = dataset.cast_column("file_url", datasets.Audio())
81
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
82
+ sentence_map = {}
83
+
84
+ open(os.path.join(folder_path, out_file), 'w', encoding='utf-8').close()
85
+
86
+ for i, sample in tqdm(enumerate(dataset)):
87
+ if sample["sentence"] != "":
88
+ audio_sample_name = name + f'_{i}.mp3'
89
+ audio_path_original = os.path.join(folder_path, audio_sample_name)
90
+
91
+ patterns = [(r"…",'。'), (r"!!",'!'), (special_characters,""), (r"\s+", "")]
92
+ for pattern, replace in patterns:
93
+
94
+ sample["sentence"] = re.sub(pattern, replace, sample["sentence"])
95
+ sample["sentence"] = (neologdn.normalize(sample["sentence"], repeat=1))
96
+ if sample["sentence"][-1] not in ["!", "?", "。"]:
97
+ sample["sentence"] += "。"
98
+
99
+ sentence_length = len(sample["sentence"])
100
+ audio_length = len(sample['file_url' if "file_url" in sample else "audio"]["array"]) / sample['file_url' if "file_url" in sample else "audio"]["sampling_rate"]
101
+
102
+ if max > audio_length > min and not re.search(char, sample["sentence"]) and sentence_length > min_char and bool(sample["sentence"]):
103
+ if not os.path.exists(audio_path_original):
104
+ sf.write(audio_path_original, sample['file_url' if "file_url" in sample else "audio"]["array"], sample['file_url' if "file_url" in sample else "audio"]["sampling_rate"])
105
+ sentence_map[audio_sample_name] = sample['sentence']
106
+
107
+ print(f"Downloaded {len(sentence_map)} audio files to {folder_path}. Starting silence trimming...")
108
+ process_directory(input_dir, output_dir)
109
+ print(f"Silence trimming complete. Trimmed files are in {output_dir}, silent files moved to {removed_dir}.")
110
+
111
+ print(f"Generating final metadata.csv in {folder_path}...")
112
+ with open(csv_file2, 'r', encoding='utf-8') as f_not_removed:
113
+ for line in f_not_removed:
114
+ trimmed_filename = line.strip()
115
+
116
+ if trimmed_filename in sentence_map:
117
+ sentence = sentence_map[trimmed_filename]
118
+ with open(os.path.join(folder_path, out_file), 'a', encoding='utf-8') as transcription_file:
119
+ transcription_file.write(trimmed_filename + ",")
120
+ transcription_file.write(sentence)
121
+ transcription_file.write('\n')
122
+ print(f"Metadata.csv generated for {os.path.join(folder_path, out_file)}.")