| | import numpy as np |
| | import librosa |
| | import random |
| | import pickle |
| | import os |
| | import torch |
| |
|
| | from torch.utils.data import Dataset, DataLoader |
| | from speechbrain.processing.signal_processing import reverberate |
| | from torch.nn.utils.rnn import pad_sequence |
| |
|
| |
|
| | def changed_index(ind, step = 0): |
| | ind_bool = ind < ind.min() - 1 |
| | if step == -1 : |
| | ind_bool[1:] = (ind+1)[:-1] == ind[1:] |
| | else: |
| | ind_bool[:-1] = (ind-step)[1:] == ind[:-1] |
| | |
| | ind_bool = ~ind_bool |
| | return ind_bool |
| |
|
| |
|
| | def post_processing_VAD(vad_out, goal = 1, len_frame_ms = 20, sensitivity_ms = 200): |
| | """Post-processing of VAD models to change 0 label0 with 1 labels according to a sensitivity. |
| | |
| | Arguments |
| | --------- |
| | vad_out : float (Tensor) |
| | Output of the VAD model. |
| | goal : int (Tensor) |
| | The goal of change. |
| | len_frame_ms : float |
| | Length of decision frame. |
| | sensitivity_ms : float |
| | Threshold to change labels that are less than it. |
| | |
| | Returns |
| | ------- |
| | vad_out : float (Tensor) |
| | The pre-processed output. |
| | |
| | """ |
| | vad_out = torch.tensor(vad_out) |
| | Th = max(int(sensitivity_ms // len_frame_ms), 1) |
| | ind0,ind1 = torch.where(vad_out== goal) |
| | |
| | if len(ind0) != 0: |
| | ind1_max = vad_out.shape[-1] - 1 |
| | ind0_last_bool = changed_index(ind0.clone()) |
| |
|
| | ind0_last = torch.where(ind0_last_bool)[0] |
| | ind0_first = torch.zeros_like(ind0_last) |
| | ind0_first[1:] = ind0_last[:-1] + 1 |
| | ind0_first[0] = 0 |
| |
|
| | ind1_l1_bool = changed_index(ind1.clone(), step = 1) |
| | ind1_l1_bool[ind0_last] = False |
| |
|
| | ind1_f1_bool = changed_index(ind1.clone(), step = -1) |
| | ind1_f1_bool[ind0_first] = False |
| |
|
| |
|
| | dif_bool = ind1[ind1_f1_bool] - ind1[ind1_l1_bool] > Th + 1 |
| | l1_bool_temp = ind1_l1_bool[ind1_l1_bool].clone() |
| | l1_bool_temp[dif_bool] = False |
| | ind1_l1_bool[ind1_l1_bool.clone()] = l1_bool_temp |
| |
|
| | f1_bool_temp = ind1_f1_bool[ind1_f1_bool].clone() |
| | f1_bool_temp[dif_bool] = False |
| | ind1_f1_bool[ind1_f1_bool.clone()] = f1_bool_temp |
| |
|
| |
|
| | second_ind = ind1[ind1_l1_bool].clone() |
| | for i in range(1,Th+1): |
| | second_ind = torch.clip(ind1[ind1_l1_bool]+i,0,ind1_max) |
| | desired_out = (second_ind < ind1[ind1_f1_bool]) |
| | temp_b = vad_out[ind0[ind1_l1_bool], second_ind].clone() |
| | temp_b[desired_out] = goal |
| | vad_out[ind0[ind1_l1_bool], second_ind] = temp_b.clone() |
| | vad_out = vad_out.numpy() |
| | return vad_out |
| |
|
| | def creat_data_pathes(base_path, |
| | groups, |
| | g_scale, |
| | filenames = "train_files_path.txt"): |
| | total_paths = [] |
| | i = 0 |
| | for group_name in groups: |
| | with open( os.path.join(base_path,group_name,filenames ), 'rb') as fp: |
| | pathes = pickle.load(fp) |
| | pathes = pathes * g_scale[i] |
| | for path in pathes: |
| | total_paths.append(os.path.join(group_name,path)) |
| | |
| | i+=1 |
| | |
| | random.seed(12) |
| | random.shuffle(total_paths) |
| | random.shuffle(total_paths) |
| | return total_paths |
| |
|
| | def pyannote_frame(len_inp,sinc_len): |
| | f = (len_inp - 251 + sinc_len)//sinc_len |
| | mp_0 = f // 3 |
| | conv1 = mp_0 - 4 |
| | mp_1 = conv1 // 3 |
| | conv2 = mp_1 - 4 |
| | mp_2 = conv2 // 3 |
| | return mp_2, len_inp// mp_2 |
| |
|
| |
|
| | def frame_target(target, num_frame, frame_shift): |
| | len_ = num_frame * frame_shift |
| | target = target[:,:len_] |
| | target = target.reshape(target.shape[0],num_frame,frame_shift) |
| | target = target.float().mean(-1,True) |
| | target[target>0.5] = 1 |
| | target[target <= 0.5] = 0 |
| | return target |
| | |
| |
|
| | class VAD_DATASET(Dataset): |
| | def __init__(self, |
| | base_clean_path, |
| | base_noise_path, |
| | base_rever_path, |
| | base_lbl, |
| | clean_paths, |
| | noise_paths, |
| | reverb_paths, |
| | sampling_rate = 16000, |
| | max_length = 10 * 16000, |
| | max_noise_n = 2, |
| | t_reverb = -1, |
| | min_snr = -10, |
| | is_post_process = False, |
| | sens_ms = 100 |
| | ): |
| | |
| | self.base_clean_path = base_clean_path |
| | self.base_noise_path = base_noise_path |
| | self.base_rever_path = base_rever_path |
| | self.base_lbl = base_lbl |
| | self.clean_paths = clean_paths |
| | self.noise_paths = noise_paths |
| | self.reverb_paths = reverb_paths |
| | self.is_post_process = is_post_process |
| | self.sens_ms = sens_ms |
| | self.len_clean = len(clean_paths) |
| | self.len_noise = len(noise_paths) |
| | self.len_reverb = len(reverb_paths) |
| | |
| | self.sampling_rate = sampling_rate |
| | self.max_length = max_length |
| | self.max_noise_n = max_noise_n |
| | self.t_reverb = t_reverb |
| | |
| | self.len_snr = len(range(min_snr,31,2)) |
| | self.SNR_amount = range(min_snr,31,2) |
| | |
| | print("Dataset is ready.") |
| | |
| | def create_reverb(self, sig, reverb_filename): |
| | reverb_ = torch.from_numpy(self.load_sample(reverb_filename)) |
| | reverb_sig = reverberate(sig.unsqueeze(dim = 0), reverb_, rescale_amp= 'peak') |
| |
|
| | return reverb_sig.squeeze() |
| | |
| | def load_sample(self, path): |
| | waveform, _ = librosa.load(path, sr=self.sampling_rate) |
| | return waveform |
| | |
| | def crop_noise(self, noise, len_x): |
| | len_n = len(noise) |
| | extra = len_n - len_x |
| | if extra > 0: |
| | first_ind = random.randint(0,extra - 1) |
| | noise = noise[first_ind:first_ind+len_x] |
| | |
| | return noise |
| | |
| | def crop_audio(self, x): |
| | len_x = len(x) |
| | extra = len_x - self.max_length |
| | if extra > 0: |
| | first_ind = random.randint(0,extra - 1) |
| | x = x[first_ind:first_ind+self.max_length] |
| | len_x = self.max_length |
| | |
| | return x, len_x |
| | |
| | def creat_noisy_data(self, x_clean, noise, SNR): |
| | sp_ener = torch.sum(x_clean**2) |
| | noi_ener = torch.sum(noise**2) |
| | a = (sp_ener/noi_ener)**0.5 * 10**(-SNR/20) |
| | x_noisy = x_clean + a * noise |
| | return x_noisy |
| | |
| | def prepare_noise(self, path, len_x): |
| | noise = self.load_sample(path) |
| | len_n = len(noise) |
| | if len_n < len_x: |
| | repeat = len_x // len_n + 1 |
| | noise = [noise for _ in range(repeat)] |
| | noise = np.concatenate(noise, axis=0) |
| |
|
| | noise = self.crop_noise(noise, len_x) |
| | return noise |
| | |
| | def creat_target(self, clean_flnm, len_x): |
| |
|
| | label_flnm = os.path.basename(clean_flnm).split("SPLIT")[0] + ".txt" |
| | with open(os.path.join(self.base_lbl,label_flnm), 'rb') as handle: |
| | framed_label = np.array(pickle.load( handle)) |
| | |
| | if self.is_post_process: |
| | framed_label = framed_label[None,...] |
| | framed_label = post_processing_VAD(framed_label, |
| | goal = 1, |
| | len_frame_ms = 20, |
| | sensitivity_ms = self.sens_ms).squeeze() |
| | label = np.repeat(framed_label, 320, axis=0) |
| | |
| | if label.shape[-1] > len_x: |
| | label = label[:len_x] |
| |
|
| | return label, framed_label |
| | |
| | |
| |
|
| | def __len__(self): |
| | return len(self.clean_paths) |
| | |
| |
|
| | def __getitem__(self, index): |
| | |
| | x_clean = self.load_sample(os.path.join(self.base_clean_path, |
| | self.clean_paths[index])) |
| | x_clean, len_x = self.crop_audio(x_clean) |
| | x_clean = x_clean * np.random.uniform(0.7,1,1) |
| | noise = self.prepare_noise(os.path.join(self.base_noise_path, |
| | self.noise_paths[random.sample(range(self.len_noise), |
| | 1)[0]]), |
| | len_x) |
| | |
| | x_clean = torch.from_numpy(x_clean) |
| | noise = torch.from_numpy(noise) |
| | |
| | is_reverb = torch.rand(1) < self.t_reverb |
| | |
| | if is_reverb: |
| | x_clean = self.create_reverb(x_clean, |
| | os.path.join(self.base_rever_path, |
| | self.reverb_paths[random.sample(range(self.len_reverb), |
| | 1)[0]])) |
| | noise = self.create_reverb(noise, |
| | os.path.join(self.base_rever_path, |
| | self.reverb_paths[random.sample(range(self.len_reverb), |
| | 1)[0]])) |
| | |
| | n_o_n = random.randint(1,self.max_noise_n) |
| | if n_o_n == 2: |
| | noise_2 = self.prepare_noise(os.path.join(self.base_noise_path, |
| | self.noise_paths[random.sample(range(self.len_noise), |
| | 1)[0]]), |
| | len_x) |
| | |
| | noise_2 = torch.from_numpy(noise_2) |
| | |
| | if is_reverb: |
| | noise_2 = self.create_reverb(noise, |
| | os.path.join(self.base_rever_path, |
| | self.reverb_paths[random.sample(range(self.len_reverb), |
| | 1)[0]])) |
| | noise = noise + noise_2 |
| | |
| | snr = self.SNR_amount[random.sample(range(self.len_snr),1)[0]] |
| | x_noisy = self.creat_noisy_data(x_clean, noise, snr) |
| |
|
| | target, framed_target = self.creat_target(self.clean_paths[index], len_x) |
| | target = torch.from_numpy(target) |
| | framed_target = torch.from_numpy(framed_target) |
| | |
| | return x_noisy, target, framed_target, is_reverb, n_o_n, snr |
| |
|
| | |
| | |
| | def collate_fn(batch): |
| | inputs, targets, length_ratio = [], [], [] |
| | for noisy_input, target, framed_target, _, _, _ in batch: |
| | inputs.append(noisy_input) |
| | targets.append(target) |
| | framed_target.append(framed_target) |
| | length_ratio.append(len(noisy_input)) |
| |
|
| | inputs = pad_sequence(inputs, batch_first=True, padding_value=0.0) |
| | targets = pad_sequence(targets, batch_first=True, padding_value=0.0) |
| | framed_target = pad_sequence(framed_target, batch_first=True, padding_value=0.0) |
| | length_ratio = torch.tensor(length_ratio, dtype=torch.long) / inputs.shape[1] |
| |
|
| | return inputs, targets, framed_target, length_ratio |
| | |
| | |
| | |
| | def audio_data_loader(base_clean_path, |
| | base_noise_path, |
| | base_rever_path, |
| | clean_paths, |
| | noise_paths, |
| | reverb_paths, |
| | sampling_rate, |
| | max_length, |
| | max_noise_n, |
| | t_reverb, |
| | min_snr, |
| | batch_size, |
| | num_workers, |
| | pin_memory, |
| | training |
| | ): |
| | |
| | dataset = Enhancement_DATASET(base_clean_path, |
| | base_noise_path, |
| | base_rever_path, |
| | clean_paths, |
| | noise_paths, |
| | reverb_paths, |
| | sampling_rate, |
| | max_length, |
| | max_noise_n, |
| | t_reverb, |
| | min_snr |
| | ) |
| | |
| | loader = DataLoader(dataset, |
| | batch_size = batch_size, |
| | shuffle = training, |
| | drop_last = True, |
| | collate_fn = collate_fn, |
| | num_workers = num_workers, |
| | pin_memory = pin_memory |
| | ) |
| | |
| | return loader |