code
stringlengths
17
6.64M
def get_Adam(model_params, lr=0.0002, **kwargs): params = [] for m in model_params: params += list(m.parameters()) return Adam(params, lr=lr, betas=(0.9, 0.999))
def get_AdamW(model_params, lr=0.0002, **kwargs): params = [] for m in model_params: params += list(m.parameters()) optimizer = AdamW(params, lr=lr) return optimizer
def get_TorchOptim(model_params, torch_optim_name, **kwargs): params = [] for m in model_params: params += list(m.parameters()) Opt_class = getattr(torch.optim, torch_optim_name) kwargs.pop('total_steps') optim = Opt_class(params, **kwargs) return optim
class AdamW(Optimizer): "\n Implements Adam algorithm with weight decay fix as introduced in\n `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.\n Parameters:\n params (:obj:`Iterable[torch.nn.parameter.Parameter]`):\n Iterable of parameters to optimize or dictionaries defining parameter groups.\n lr (:obj:`float`, `optional`, defaults to 1e-3):\n The learning rate to use.\n betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):\n Adam's betas parameters (b1, b2).\n eps (:obj:`float`, `optional`, defaults to 1e-6):\n Adam's epsilon for numerical stability.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n Decoupled weight decay to apply.\n correct_bias (:obj:`bool`, `optional`, defaults to `True`):\n Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).\n " def __init__(self, params: Iterable[torch.nn.parameter.Parameter], lr: float=0.001, betas: Tuple[(float, float)]=(0.9, 0.999), eps: float=1e-07, weight_decay: float=0.0, correct_bias: bool=True): if (lr < 0.0): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1])) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super().__init__(params, defaults) def step(self, closure: Callable=None): '\n Performs a single optimization step.\n Arguments:\n closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2)) denom = exp_avg_sq.sqrt().add_(group['eps']) step_size = group['lr'] if group['correct_bias']: bias_correction1 = (1.0 - (beta1 ** state['step'])) bias_correction2 = (1.0 - (beta2 ** state['step'])) step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1) p.data.addcdiv_(exp_avg, denom, value=(- step_size)) if (group['weight_decay'] > 0.0): p.data.add_(p.data, alpha=((- group['lr']) * group['weight_decay'])) return loss def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if (len(state) == 0): pass else: lr.append(group['lr']) return lr
class _LRSchedule(ABC): ' Parent of all LRSchedules here. ' warn_t_total = False def __init__(self, warmup=0.002, t_total=(- 1), **kw): '\n :param warmup: what fraction of t_total steps will be used for linear warmup\n :param t_total: how many training steps (updates) are planned\n :param kw:\n ' super(_LRSchedule, self).__init__(**kw) if (t_total < 0): logger.warning('t_total value of {} results in schedule not being applied'.format(t_total)) if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))): raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup)) warmup = max(warmup, 0.0) (self.warmup, self.t_total) = (float(warmup), float(t_total)) self.warned_for_t_total_at_progress = (- 1) def get_lr(self, step, nowarn=False): "\n :param step: which of t_total steps we're on\n :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps\n :return: learning rate multiplier for current update\n " if (self.t_total < 0): return 1.0 progress = (float(step) / self.t_total) ret = self.get_lr_(progress) if ((not nowarn) and self.warn_t_total and (progress > 1.0) and (progress > self.warned_for_t_total_at_progress)): logger.warning("Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly.".format(ret, self.__class__.__name__)) self.warned_for_t_total_at_progress = progress return ret @abc.abstractmethod def get_lr_(self, progress): '\n :param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress\n :return: learning rate multiplier for current update\n ' return 1.0
class ConstantLR(_LRSchedule): def get_lr_(self, progress): return 1.0
class WarmupCosineSchedule(_LRSchedule): '\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.\n If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.\n ' warn_t_total = True def __init__(self, warmup=0.002, t_total=(- 1), cycles=0.5, **kw): '\n :param warmup: see LRSchedule\n :param t_total: see LRSchedule\n :param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.\n :param kw:\n ' super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw) self.cycles = cycles def get_lr_(self, progress): if (progress < self.warmup): return (progress / self.warmup) else: progress = ((progress - self.warmup) / (1 - self.warmup)) return (0.5 * (1.0 + math.cos((((math.pi * self.cycles) * 2) * progress))))
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule): '\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying\n learning rate (with hard restarts).\n ' def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw): super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw) assert (cycles >= 1.0) def get_lr_(self, progress): if (progress < self.warmup): return (progress / self.warmup) else: progress = ((progress - self.warmup) / (1 - self.warmup)) ret = (0.5 * (1.0 + math.cos((math.pi * ((self.cycles * progress) % 1))))) return ret
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule): '\n All training progress is divided in `cycles` (default=1.) parts of equal length.\n Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,\n followed by a learning rate decreasing from 1. to 0. following a cosine curve.\n ' def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw): assert ((warmup * cycles) < 1.0) warmup = ((warmup * cycles) if (warmup >= 0) else warmup) super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw) def get_lr_(self, progress): progress = ((progress * self.cycles) % 1.0) if (progress < self.warmup): return (progress / self.warmup) else: progress = ((progress - self.warmup) / (1 - self.warmup)) ret = (0.5 * (1.0 + math.cos((math.pi * progress)))) return ret
class WarmupConstantSchedule(_LRSchedule): '\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Keeps learning rate equal to 1. after warmup.\n ' def get_lr_(self, progress): if (progress < self.warmup): return (progress / self.warmup) return 1.0
class WarmupLinearSchedule(_LRSchedule): '\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.\n ' warn_t_total = True def get_lr_(self, progress): if (progress < self.warmup): return (progress / self.warmup) return max(((progress - 1.0) / (self.warmup - 1.0)), 0.0)
class BertAdam(Optimizer): "Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1\n schedule: schedule to use for the warmup (see above).\n Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).\n If `None` or `'none'`, learning rate is always kept constant.\n Default : `'warmup_linear'`\n betas: Adams betas. Default: (0.9, 0.999)\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n " def __init__(self, params=None, lr='required', warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), e=1e-06, weight_decay=0.01, max_grad_norm=1.0, **kwargs): if ((lr == 'required') or (lr < 0.0)): raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr)) if ((not isinstance(schedule, _LRSchedule)) and (schedule not in SCHEDULES)): raise ValueError('Invalid schedule parameter: {}'.format(schedule)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {} - should be in [0.0, 1.0['.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {} - should be in [0.0, 1.0['.format(betas[1])) if (not (e >= 0.0)): raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e)) if (not isinstance(schedule, _LRSchedule)): schedule_type = SCHEDULES[schedule] schedule = schedule_type(warmup=warmup, t_total=t_total) elif ((warmup != (- 1)) or (t_total != (- 1))): logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.') defaults = dict(lr=lr, schedule=schedule, betas=betas, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if (len(state) == 0): pass else: lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) lr.append(lr_scheduled) return lr def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['next_m'] = torch.zeros_like(p.data) state['next_v'] = torch.zeros_like(p.data) (next_m, next_v) = (state['next_m'], state['next_v']) (beta1, beta2) = group['betas'] if (group['max_grad_norm'] > 0): clip_grad_norm_(p, group['max_grad_norm']) next_m.mul_(beta1).add_((1 - beta1), grad) next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad) update = (next_m / (next_v.sqrt() + group['e'])) if (group['weight_decay'] > 0.0): update += (group['weight_decay'] * p.data) lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) update_with_lr = (lr_scheduled * update) p.data.add_((- update_with_lr)) state['step'] += 1 return loss
class Lamb(Optimizer): "Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n warmup (float, optional): portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total (int, optional): total number of training steps for the learning\n rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1\n schedule (string, optional): schedule to use for the warmup (see above).\n Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).\n If `None` or `'none'`, learning rate is always kept constant.\n Default : `'warmup_linear'`\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes. Set to True for AdamW.\n correct_bias (bool, optional): adam-correction, no bias correction for Bert. Set to True for AdamW.\n .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n " def __init__(self, params, lr=0.001, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, adam=False, correct_bias=False): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if (not isinstance(schedule, _LRSchedule)): schedule_type = SCHEDULES[schedule] schedule = schedule_type(warmup=warmup, t_total=t_total) elif ((warmup != (- 1)) or (t_total != (- 1))): logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.') defaults = dict(lr=lr, betas=betas, eps=eps, schedule=schedule, weight_decay=weight_decay, correct_bias=correct_bias) self.adam = adam super(Lamb, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if (len(state) == 0): pass else: lr_scheduled = group['lr'] lr_scheduled *= group['schedule'].get_lr(state['step']) lr.append(lr_scheduled) return lr def step(self, closure=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p.data) state['exp_avg_sq'] = torch.zeros_like(p.data) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg.mul_(beta1).add_((1 - beta1), grad) exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) step_size = group['lr'] if group['correct_bias']: bias_correction1 = (1.0 - (beta1 ** state['step'])) bias_correction2 = (1.0 - (beta2 ** state['step'])) step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1) lr_scheduled = (step_size * group['schedule'].get_lr(state['step'])) weight_norm = p.data.pow(2).sum().sqrt() adam_step = (exp_avg / exp_avg_sq.sqrt().add(group['eps'])) if (group['weight_decay'] != 0): adam_step.add_(group['weight_decay'], p.data) adam_norm = adam_step.pow(2).sum().sqrt() if ((weight_norm == 0) or (adam_norm == 0)): trust_ratio = 1 else: trust_ratio = (weight_norm / adam_norm) state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(((- lr_scheduled) * trust_ratio), adam_step) return loss
def main(): if (not os.path.isdir(KALDI_ROOT)): print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT) exit() if (not os.path.isdir(LIBRI_PATH)): print('Invalid path for the kaldi librispeech dataset: ', LIBRI_PATH) print('Please run the kaldi scripts first! More information are described in the README file and Wiki page.') if (not os.path.isdir(OUTPUT_DIR)): os.mkdir(OUTPUT_DIR) for s in SETS: with ReadHelper(((((('ark:' + LIBRI_PATH) + s) + '/') + DATA_TYPE) + '_cmvn.ark')) as reader: output = {} print('Preprocessing', s, 'data...') cur_dir = os.path.join(OUTPUT_DIR, s.replace('_', '-')) if (not os.path.isdir(cur_dir)): os.mkdir(cur_dir) for (key, array) in tqdm(reader): array = np.asarray(array).astype('float32') np.save(os.path.join(cur_dir, key), array) output[os.path.join(s.replace('_', '-'), (key + '.npy'))] = len(array) output = sorted(output.items(), key=operator.itemgetter(1), reverse=True) df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'}) df.to_csv(os.path.join(OUTPUT_DIR, (s.replace('_', '-') + '.csv'))) print((("[ARK-TO-LIBRI] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit.")) exit()
def main(): if (not os.path.isdir(KALDI_ROOT)): print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT) exit() if (not os.path.isdir(TIMIT_PATH)): print('Invalid path for the kaldi TIMIT dataset: ', TIMIT_PATH) print('Please run the kaldi scripts first! More information are described in the README file and Wiki page.') if (not os.path.isdir(OUTPUT_DIR)): os.mkdir(OUTPUT_DIR) for s in SETS: output = {} print('Preprocessing', s, 'data...') cur_dir = os.path.join(OUTPUT_DIR, s.replace('_', '-')) if (not os.path.isdir(cur_dir)): os.mkdir(cur_dir) for i in range(10): with ReadHelper(((((((('ark:' + TIMIT_PATH) + s) + '/data/feats_fmllr_') + s) + '.') + str((i + 1))) + '.ark')) as reader: for (key, array) in tqdm(reader): array = np.asarray(array).astype('float32') np.save(os.path.join(cur_dir, key), array) output[os.path.join(s.replace('_', '-'), (key + '.npy'))] = len(array) output = sorted(output.items(), key=operator.itemgetter(1), reverse=True) df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'}) df.to_csv(os.path.join(OUTPUT_DIR, (s.replace('_', '-') + '.csv'))) print((("[ARK-TO-TIMIT] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit.")) exit()
def main(): if (not os.path.isdir(KALDI_PATH)): print('CHANGE THIS TO YOUR OWN KALDI PATH: ', KALDI_PATH) print('Please run the kaldi scripts first to generate kaldi data directory.') exit() if (not os.path.isdir(OUTPUT_DIR)): os.mkdir(OUTPUT_DIR) for s in SETS: print('Preprocessing', s, 'data...') output = {} cur_dir = os.path.join(OUTPUT_DIR, s) if (not os.path.isdir(cur_dir)): os.mkdir(cur_dir) path = os.path.join(KALDI_PATH, (s + '/feats.scp')) for (key, mat) in tqdm(kaldi_io.read_mat_scp(path)): array = np.asarray(mat).astype('float32') np.save(os.path.join(cur_dir, key), array) output[os.path.join(s, (key + '.npy'))] = len(array) output = sorted(output.items(), key=operator.itemgetter(1), reverse=True) df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'}) df.to_csv(os.path.join(OUTPUT_DIR, (s + '.csv'))) print((("[ARK-TO-VOXCELEB] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit.")) exit()
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.') parser.add_argument('-i', '--input_data', default='../LibriSpeech/', type=str, help='Path to your LibriSpeech directory', required=False) parser.add_argument('-o', '--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False) parser.add_argument('-n', '--name', default='len_for_bucket', type=str, help='Name of the output directory', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) args = parser.parse_args() return args
def extract_length(input_file): torchaudio.set_audio_backend('sox_io') return torchaudio.info(input_file).num_frames
def generate_length(args, tr_set, audio_extension): for (i, s) in enumerate(tr_set): if os.path.isdir(os.path.join(args.input_data, s.lower())): s = s.lower() elif os.path.isdir(os.path.join(args.input_data, s.upper())): s = s.upper() else: assert NotImplementedError print('') todo = list(Path(os.path.join(args.input_data, s)).rglob(('*' + audio_extension))) print(f'Preprocessing data in: {s}, {len(todo)} audio files found.') output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) print('Extracting audio length...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_length)(str(file)) for file in tqdm(todo))) sorted_todo = [os.path.join(s, str(todo[idx]).split((s + '/'))[(- 1)]) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None}) df.to_csv(os.path.join(output_dir, (tr_set[i] + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() if ('librilight' in args.input_data.lower()): SETS = (['small', 'medium', 'large'] + ['small-splitted', 'medium-splitted', 'large-splitted']) elif ('librispeech' in args.input_data.lower()): SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other'] elif ('timit' in args.input_data.lower()): SETS = ['TRAIN', 'TEST'] else: raise NotImplementedError for (idx, s) in enumerate(SETS): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [SETS[int(t)] for t in tr_set.split(' ')] generate_length(args, tr_set, args.audio_extension)
def locate_txt(flac): filename = os.path.basename(flac) tags = filename.split('.')[0].split('-') txt_path = os.path.join(os.path.dirname(flac), f'{tags[0]}-{tags[1]}.trans.txt') return txt_path
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='./data/libri_alignment', type=str, help='Path to raw LibriSpeech alignment') parser.add_argument('--output_path', default='./data/libri_phone', type=str, help='Path to store output', required=False) args = parser.parse_args() return args
def phone_preprocess(data_path, output_path, sets, unaligned): print('Data sets :') for (idx, s) in enumerate(sets): print('\t', idx, ':', s) todo_sets = input('Please enter the index for preprocessing sets (seperate w/ space): ') sets = [sets[int(s)] for s in todo_sets.split(' ')] idx = 0 phone2idx = {} for s in sets: print('') print('Computing', s, 'data...') for path in tqdm(list(Path(os.path.join(data_path, s)).rglob('*.txt'))): check_name = path.as_posix().split('/')[(- 1)].split('.')[0] if ((check_name not in unaligned) and (check_name != 'unaligned')): for line in open(path).readlines(): phone = line.strip('\n').split(' ')[(- 1)] if (phone not in phone2idx): phone2idx[phone] = idx idx += 1 print('Phone set:') print(phone2idx) print(len(phone2idx), 'distinct phones found in', sets) with open(os.path.join(output_path, 'phone2idx.pkl'), 'wb') as fp: pickle.dump(phone2idx, fp) for s in sets: print('') print('Preprocessing', s, 'data...') todo = list(Path(os.path.join(data_path, s)).rglob('*.txt')) print(len(todo), 'audio files found in', s) if (not os.path.exists(os.path.join(output_path, s))): os.makedirs(os.path.join(output_path, s)) print('Preprocessing phone alignments...', flush=True) for path in tqdm(todo): check_name = path.as_posix().split('/')[(- 1)].split('.')[0] if ((check_name not in unaligned) and (check_name != 'unaligned')): x = [] file = open(path).readlines() for line in file: line = line.strip('\n').split(' ') x += time_to_frame(start_time=float(line[0]), end_time=float(line[1]), phone=phone2idx[line[2]]) x = np.asarray(x) path_to_save = str(path).replace(data_path.split('/')[(- 1)], output_path.split('/')[(- 1)]).replace('txt', 'pkl') with open(path_to_save, 'wb') as fp: pickle.dump(x, fp) print('Phone preprocessing complete!')
def time_to_frame(start_time, end_time, phone): phones = [] start_time = int((start_time * sample_rate)) end_time = int((end_time * sample_rate)) (_, hop_length, win_length) = _stft_parameters(sample_rate=sample_rate) h_window = (win_length * 0.5) start_time = ((start_time - h_window) if (start_time >= h_window) else 0) end_time = ((end_time - h_window) if (end_time >= h_window) else 0) times = ((((end_time // hop_length) - (start_time // hop_length)) + (1 if ((start_time % hop_length) == 0) else 0)) - (1 if ((end_time % hop_length) == 0) else 0)) phones += ([phone] * int(times)) return phones
def main(): args = get_preprocess_args() if (not os.path.exists(args.output_path)): os.makedirs(args.output_path) try: file = open(os.path.join(args.data_path, 'train-clean-360/unaligned.txt')).readlines() unaligned = [str(line).split('\t')[0].split(' ')[0] for line in file] print('Unaligned list: ', unaligned) unaligned_pkl = [(('train-clean-360/' + u) + '.npy') for u in unaligned] with open(os.path.join(args.output_path, 'unaligned.pkl'), 'wb') as fp: pickle.dump(unaligned_pkl, fp) except: raise ValueError('Did not find unaligned.txt!') sets = ['train-clean-360', 'test-clean'] phone_preprocess(args.data_path, args.output_path, sets, unaligned)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.') parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('--audio_extention', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False) parser.add_argument('--feature_type', default='fbank', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--delta', default=False, type=boolean_string, help='Append Delta', required=False) parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False) args = parser.parse_args() return args
def acoustic_preprocess(args, tr_set, dim, audio_extention): for (i, s) in enumerate(tr_set): print('') print('Preprocessing data in: ', s, end='') todo = list(Path(os.path.join(args.data_root, s)).rglob(('*' + audio_extention))) print(len(todo), 'audio files found.') if (args.name == 'None'): output_dir = os.path.join(args.output_path, '_'.join(['NewData', (str(args.feature_type) + str(dim))])) else: output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) cur_path = os.path.join(output_dir, tr_set[i]) if (not os.path.exists(cur_path)): os.makedirs(cur_path) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].replace(audio_extention, ''))) for file in tqdm(todo))) sorted_todo = [os.path.join(tr_set[i], str(todo[idx]).split('/')[(- 1)].replace(audio_extention, '.npy')) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None}) df.to_csv(os.path.join(output_dir, (tr_set[i] + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta))) mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta))) dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim)) print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn) for (idx, s) in enumerate(SETS): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [SETS[int(t)] for t in tr_set.split(' ')] acoustic_preprocess(args, tr_set, dim, args.audio_extention)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='./data/LibriSpeech', type=str, help='Path to raw LibriSpeech dataset') parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--delta', default=True, type=boolean_string, help='Append Delta', required=False) parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False) args = parser.parse_args() return args
def acoustic_preprocess(args, tr_set, dim): for s in tr_set: print('') print('Preprocessing', s, 'data...', end='') todo = list(Path(os.path.join(args.data_path, s)).rglob('*.flac')) print(len(todo), 'audio files found in', s) if (args.name == 'None'): output_dir = os.path.join(args.output_path, '_'.join(['libri', (str(args.feature_type) + str(dim))])) else: output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) cur_path = os.path.join(output_dir, s) if (not os.path.exists(cur_path)): os.makedirs(cur_path) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].replace('.flac', ''))) for file in tqdm(todo))) sorted_todo = [os.path.join(s, str(todo[idx]).split('/')[(- 1)].replace('.flac', '.npy')) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': 'None'}) df.to_csv(os.path.join(output_dir, (s + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta))) mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta))) dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim)) print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn) print('Data sets :') sets = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other'] for (idx, s) in enumerate(sets): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [sets[int(t)] for t in tr_set.split(' ')] acoustic_preprocess(args, tr_set, dim)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def bracket_underscore(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = ((utterance_name + '_') + str((number + 1))) return string
def underscore_bracket(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser() parser.add_argument('--flac_path', default='../../data/mosei/flac', type=str, help='Path to MOSEI segmented FLAC files') parser.add_argument('--output_path', default='../../data/mosei', type=str, help='Path to store segmented npys', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) args = parser.parse_args() return args
def extract_mosei(args, dim): assert os.path.exists(args.flac_path), f'{args.flac_path} not exists' todo = list(Path(args.flac_path).glob('*.flac')) print(len(todo), 'audio files found in MOSEI') assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported' if (not os.path.exists(args.output_path)): os.makedirs(args.output_path) npy_dir = os.path.join(args.output_path, (str(args.feature_type) + str(dim))) for target_dir in [npy_dir]: if os.path.exists(target_dir): decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ') if (decision.upper() == 'Y'): print(f'Removing {target_dir}') shutil.rmtree(target_dir) else: print('Abort') exit(0) os.makedirs(target_dir) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, cmvn=args.apply_cmvn, save_feature=os.path.join(npy_dir, str(file).split('/')[(- 1)].replace('.flac', ''))) for file in tqdm(todo)))
def main(): args = get_preprocess_args() dim = (num_freq if (args.feature_type == 'linear') else mel_dim) extract_mosei(args, dim)
def get_preprocess_args(): parser = argparse.ArgumentParser() parser.add_argument('--npy_path', default='../../data/mosei/mel160', type=str, help='Path to MOSEI segmented NPY files') parser.add_argument('--csv_path', default='../../data/mosei/mosei_no_semi.csv', type=str, help='Path to mosei_no_semi.csv', required=False) args = parser.parse_args() return args
def add_length(args): csv = pd.read_csv(args.csv_path) lengths = [] for (index, row) in csv.iterrows(): npy = np.load(os.path.join(args.npy_path, (row.key + '.npy'))) lengths.append(npy.shape[0]) csv['length'] = lengths csv.to_csv(args.csv_path, index=False)
def main(): args = get_preprocess_args() add_length(args)
def bracket_underscore(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = ((utterance_name + '_') + str((number + 1))) return string
def underscore_bracket(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='/home/leo/d/datasets/MOSEI/Raw/Audio/Full/WAV_16000', type=str, help='Path to MOSEI non-segmented WAV files') parser.add_argument('--output_path', default='../../data/mosei', type=str, help='Path to store segmented flac and npys. Should already contains mosei_no_semi.csv', required=False) args = parser.parse_args() return args
def segment_mosei(args): output_dir = args.output_path mosei_summary = os.path.join(output_dir, 'mosei_no_semi.csv') flac_dir = os.path.join(output_dir, 'flac') assert os.path.exists(mosei_summary), 'Output path should already be created with a mosei_no_semi.csv inside it' for target_dir in [flac_dir]: if os.path.exists(target_dir): decision = input(f'{target_dir} already exists. Remove it? [Y/N]: ') if (decision.upper() == 'Y'): shutil.rmtree(target_dir) print(f'{target_dir} removed') else: print('Abort') exit(0) os.makedirs(target_dir) df = pd.read_csv(mosei_summary) for (index, row) in df.iterrows(): underscore = row.key wavname = f'{row.filename}.wav' wavpath = os.path.join(args.data_path, wavname) assert os.path.exists(wavpath), f'wav not exists: {wavpath}' wav = AudioSegment.from_wav(wavpath) start = int((row.start * 1000)) end = int((row.end * 1000)) assert (start >= 0), f'{underscore} has negative start time' assert (end >= 0), f'{underscore} has negative end time' seg_wav = wav[start:end] seg_flacpath = os.path.join(flac_dir, f'{underscore}.flac') seg_wav.export(seg_flacpath, format='flac', parameters=['-ac', '1', '-sample_fmt', 's16', '-ar', '16000'])
def main(): args = get_preprocess_args() segment_mosei(args)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def sdk2npy(string): split = string.split('[') utterance_name = split[0] number = int(split[1].split(']')[0]) string = (((utterance_name + '_') + str((number + 1))) + '.npy') return string
def npy2sdk(string): split = string.split('_') number = int(split[(- 1)][:(- 4)]) utterance_name = '_'.join(split[:(- 1)]) string = (((utterance_name + '[') + str((number - 1))) + ']') return string
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='/home/leo/d/datasets/MOSI/Raw/Audio/WAV_16000/Segmented', type=str, help='Path to raw MOSI segmented audio dataset') parser.add_argument('--output_path', default='../data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--n_tokens', default=5000, type=int, help='Vocabulary size of target', required=False) args = parser.parse_args() return args
def acoustic_preprocess(args, dim): todo = list(Path(args.data_path).glob('*.wav')) print(len(todo), 'audio files found in MOSI') assert (args.feature_type in ['mel', 'linear', 'fbank']), 'Feature type unsupported' output_dir = os.path.join(args.output_path, '_'.join(['mosi', (str(args.feature_type) + str(dim))])) if (not os.path.exists(output_dir)): os.makedirs(output_dir) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, cmvn=args.apply_cmvn, save_feature=os.path.join(output_dir, str(file).split('/')[(- 1)].replace('.wav', ''))) for file in tqdm(todo))) DATASET = md.cmu_mosi try: md.mmdataset(DATASET.labels, args.data_path) except RuntimeError: print('Labels have been downloaded previously.') label_field = 'CMU_MOSI_Opinion_Labels' features = [label_field] recipe = {feat: (os.path.join(args.data_path, feat) + '.csd') for feat in features} dataset = md.mmdataset(recipe) dataset.align(label_field) utterances = os.listdir(output_dir) for segment_sdk in dataset[label_field].keys(): segment_npy = sdk2npy(segment_sdk) try: assert (segment_npy in utterances) except AssertionError: print('AssertionError: Cannot find corresponding utterance for given label') try: assert (npy2sdk(segment_npy) == segment_sdk) except AssertionError: print('AssertionError: npt2sdk funtion has bug') sorted_xlen = [] sorted_y = [] sorted_todo = [] for idx in reversed(np.argsort(tr_x)): filename = str(todo[idx]).split('/')[(- 1)].replace('.wav', '.npy') sdkname = npy2sdk(filename) if (sdkname in dataset[label_field].keys()): sorted_xlen.append(tr_x[idx]) sorted_y.append(dataset[label_field][sdkname]['features'].reshape((- 1))[0]) sorted_todo.append(filename) df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(sorted_xlen), 'label': sorted_y}) train_split = DATASET.standard_folds.standard_train_fold dev_split = DATASET.standard_folds.standard_valid_fold test_split = DATASET.standard_folds.standard_test_fold npy_dir = os.path.join(output_dir, 'npy') if (not os.path.exists(npy_dir)): os.mkdir(npy_dir) def classify(file_name): file_name = file_name[0] prefix = '_'.join(file_name.split('_')[:(- 1)]) shutil.move(os.path.join(output_dir, file_name), os.path.join(npy_dir, file_name)) if (prefix in train_split): return 'train' elif (prefix in dev_split): return 'dev' elif (prefix in test_split): return 'test' else: assert 0, 'Error in preprocess_mosi.py:146' belong = np.apply_along_axis((lambda file_name: classify(file_name)), 1, df['file_path'].values.reshape((- 1), 1)) df.insert(len(df.columns), 'set', belong) train_frame = df[(df.set == 'train')] dev_frame = df[(df.set == 'dev')] test_frame = df[(df.set == 'test')] df.to_csv(os.path.join(output_dir, 'all.csv')) train_frame.to_csv(os.path.join(output_dir, 'train.csv')) dev_frame.to_csv(os.path.join(output_dir, 'dev.csv')) test_frame.to_csv(os.path.join(output_dir, 'test.csv')) remain_npy = glob.glob(os.path.join(output_dir, '*.npy')) print((('Delete ' + str(len(remain_npy))) + ' unlabeled npy files:')) for npy in remain_npy: print(('delete ' + npy)) os.remove(npy) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() dim = (num_freq if (args.feature_type == 'linear') else mel_dim) acoustic_preprocess(args, dim)
def boolean_string(s): if (s not in ['False', 'True']): raise ValueError('Not a valid boolean string') return (s == 'True')
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.') parser.add_argument('--data_path', default='./data/timit', type=str, help='Path to raw TIMIT dataset') parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False) parser.add_argument('--feature_type', default='mel', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False) parser.add_argument('--delta', default=True, type=boolean_string, help='Append Delta', required=False) parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False) parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False) parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False) args = parser.parse_args() return args
def preprocess(args, dim): for s in ('train', 'dev', 'test'): print('') print(f'Preprocessing {s} data...', end='') todo = list(Path(os.path.join(args.data_path, s.upper())).rglob('*.[wW][aA][vV]')) if (len(todo) == 0): todo = list(Path(os.path.join(args.data_path, s)).rglob('*.[wW][aA][vV]')) print(len(todo), f'audio files found in {s} set') if (args.name == 'None'): output_dir = os.path.join(args.output_path, '_'.join(['timit', (str(args.feature_type) + str(dim))])) else: output_dir = os.path.join(args.output_path, args.name) if (not os.path.exists(output_dir)): os.makedirs(output_dir) cur_path = os.path.join(output_dir, s) if (not os.path.exists(cur_path)): os.makedirs(cur_path) print('Extracting acoustic feature...', flush=True) tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].split('.')[0])) for file in tqdm(todo))) sorted_todo = [os.path.join(s, (str(todo[idx]).split('/')[(- 1)].split('.')[0] + '.npy')) for idx in reversed(np.argsort(tr_x))] df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None}) df.to_csv(os.path.join(output_dir, (s + '.csv'))) print('All done, saved at', output_dir, 'exit.')
def main(): args = get_preprocess_args() mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta))) mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta))) dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim)) print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn) preprocess(args, dim)
def word_normalise(words): ret = [] for word in words: if (word.lower() in months): word = months[word.lower()] if (word.lower() in replace_words): word = replace_words[word.lower()] for regex in replace_vocab: word = re.sub(regex, '', word) word = re.sub('[\\.\\,\\!\\?;\\/]', '', word) ret.append(word) return ret
def sent_normalise(text, slots_split=None): (norm_slots, norm_texts) = ([], []) text_split = text.split(' ') if (slots_split is None): slots_split = (['O'] * len(text_split)) for idx in range(len(text_split)): if (text_split[idx] in '.,!?;/]'): continue if (text_split[idx] in reservations): for word in reservations[text_split[idx]].split(' '): norm_texts.append(word) norm_slots.append(slots_split[idx]) continue norm_text = normalise(word_normalise([text_split[idx]]), variety='AmE', verbose=False) for phrase in norm_text: if (phrase == ''): continue for word in re.split(' |\\-', phrase): word = re.sub('[\\.\\,\\!\\?;\\/]', '', word) if (word == ''): continue norm_texts.append(word) norm_slots.append(slots_split[idx]) return (norm_slots, norm_texts)
def process_raw_snips_file(file, out_f): with open(file) as f: content = f.readlines() content = [x.strip() for x in content] with open(out_f, 'w') as f: for (cnt, line) in enumerate(content): text = line.split(' <=> ')[0] intent = line.split(' <=> ')[1] text_split = [(x.replace('::', ':').split(':')[0] if (len(x.replace('::', ':').split(':')) == 2) else ' ') for x in text.split()] text_entities = ' '.join(text_split) slots_split = [x.replace('::', ':').split(':')[1] for x in text.split()] slots_entities = ' '.join(slots_split) assert (len(text_split) == len(slots_split)), (text_split, slots_split) f.write(('%d | BOS %s EOS | O %s | %s\n' % (cnt, text_entities, slots_entities, intent)))
def remove_IBO_from_snipt_vocab_slot(in_f, out_f): with open(in_f) as f: content = f.readlines() content = [x.strip() for x in content] for (idx, line) in enumerate(content): if (line != 'O'): content[idx] = line[len('B-'):] content = set(content) with open(out_f, 'w') as f: for line in content: f.write(('%s\n' % line))
def process_daniel_snips_file(content): content = [x.strip() for x in content] utt_ids = [x.split('\t', 1)[0] for x in content] valid_uttids = [x for x in utt_ids if (x.split('-')[1] == 'valid')] test_uttids = [x for x in utt_ids if (x.split('-')[1] == 'test')] train_uttids = [x for x in utt_ids if (x.split('-')[1] == 'train')] (utt2text, utt2slots, utt2intent) = ({}, {}, {}) assert (len(utt_ids) == len(set(utt_ids))) for line in content: (uttid, text, slots, intent) = line.split('\t') if (len(text.split()) != len(slots.split())): assert (len(text.split(' ')) == 2) empty_idx = (text.split().index(text.split(' ')[0].split()[(- 1)]) + 1) slots_list = slots.split() del slots_list[empty_idx] cleaned_slots = ' '.join(slots_list) assert (len(text.split()) == len(slots_list)) cleaned_text = ' '.join(text.split()) else: (cleaned_text, cleaned_slots) = (text, slots) cleaned_slots = ' '.join([(x.split('/')[1] if (x != 'O') else x) for x in cleaned_slots.split()]) utt2text[uttid] = cleaned_text utt2slots[uttid] = cleaned_slots utt2intent[uttid] = intent (test_utt2text, test_utt2slots, test_utt2intent) = ({}, {}, {}) (valid_utt2text, valid_utt2slots, valid_utt2intent) = ({}, {}, {}) (train_utt2text, train_utt2slots, train_utt2intent) = ({}, {}, {}) for utt in valid_uttids: valid_utt2text[utt] = utt2text[utt] valid_utt2slots[utt] = utt2slots[utt] valid_utt2intent[utt] = utt2intent[utt] for utt in test_uttids: test_utt2text[utt] = utt2text[utt] test_utt2slots[utt] = utt2slots[utt] test_utt2intent[utt] = utt2intent[utt] for utt in train_uttids: train_utt2text[utt] = utt2text[utt] train_utt2slots[utt] = utt2slots[utt] train_utt2intent[utt] = utt2intent[utt] assert (len(set(valid_utt2intent.values())) == len(set(test_utt2intent.values())) == len(set(train_utt2intent.values())) == 7) assert (len(valid_utt2intent.keys()) == len(test_utt2intent.keys()) == 700) assert (len(train_utt2intent.keys()) == 13084) def __return_set_of_slots(utt2slots): all_slots = [] for slot in utt2slots.values(): all_slots.extend(slot.split()) unique_slots = set(all_slots) return unique_slots assert (len(__return_set_of_slots(valid_utt2slots)) == len(__return_set_of_slots(test_utt2slots)) == len(__return_set_of_slots(train_utt2slots)) == 40) return ((train_utt2text, train_utt2slots, train_utt2intent), (valid_utt2text, valid_utt2slots, valid_utt2intent), (test_utt2text, test_utt2slots, test_utt2intent))
def map_and_link_snips_audio(snips_audio_dir, link_dir): result = [y for x in os.walk(snips_audio_dir) for y in glob(os.path.join(x[0], '*.mp3'))] for path in result: person = path.split('/')[8].split('_')[1] filename = path.split('/')[(- 1)] if (filename[:5] != 'snips'): continue uttid = filename.split('.')[0] new_uttid = ((person + '-') + filename) partition = uttid.split('-')[1] destination = os.path.join(link_dir, partition, new_uttid) shutil.copyfile(path, destination)
def create_multispk_for_snips(output_dir): speakers = 'Aditi Amy Brian Emma Geraint Ivy Joanna Joey Justin Kendra Kimberly Matthew Nicole Raveena Russell Salli'.split(' ') dataset_info = [{'split': 'test', 'num_utts': 700}, {'split': 'valid', 'num_utts': 700}, {'split': 'train', 'num_utts': 13084}] test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w') for data in dataset_info: num_utts = data['num_utts'] split = data['split'] with open(os.path.join(output_dir, ('single-matched-snips.%s.w-intent' % split))) as f: content = f.readlines() utt2line = {x.strip().split()[0]: x.strip() for x in content} for spk in speakers: for num in range(num_utts): uttid = ('%s-snips-%s-%d' % (spk, split, num)) line = utt2line[('snips-%s-%d' % (split, num))] text = line.split('\t')[1].upper() slots = line.split('\t')[2] intent = line.split('\t')[3] test_out_f.write(('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))) test_out_f.close()
def apply_text_norm_and_modify_slots(all_tsv, output_dir): (train_dirs, valid_dirs, test_dirs) = process_daniel_snips_file(all_tsv) test_file = open(os.path.join(output_dir, 'single-matched-snips.test.w-intent'), 'w') vocab_slot = {} for uttid in tqdm.tqdm(test_dirs[0].keys(), desc='Text Normalising on testing set'): text = test_dirs[0][uttid] slots = test_dirs[1][uttid] intent = test_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) test_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) test_file.close() valid_file = open(os.path.join(output_dir, 'single-matched-snips.valid.w-intent'), 'w') for uttid in tqdm.tqdm(valid_dirs[0].keys(), desc='Text Normalising on validation set'): text = valid_dirs[0][uttid] slots = valid_dirs[1][uttid] intent = valid_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) valid_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) valid_file.close() train_file = open(os.path.join(output_dir, 'single-matched-snips.train.w-intent'), 'w') for uttid in tqdm.tqdm(train_dirs[0].keys(), desc='Text Normalising on training set'): text = train_dirs[0][uttid] slots = train_dirs[1][uttid] intent = train_dirs[2][uttid] slots_split = slots.split() for s in slots_split: vocab_slot.setdefault(s, 0) vocab_slot[s] += 1 (norm_slots, norm_texts) = sent_normalise(text, slots_split) assert (len(norm_texts) == len(norm_slots)), (norm_texts, norm_slots) train_file.write(('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))) train_file.close() vocab_file = open(os.path.join(output_dir, 'slots.txt'), 'w') vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()), key=(lambda x: vocab_slot[x]), reverse=True)))
def sox_func(inputs): (files, root, out_root, speaker) = inputs for name in tqdm.tqdm(files, desc=('Process for speaker: ' + speaker)): if name.endswith('.mp3'): split = name.split('-')[1] out_dir = os.path.join(out_root, split) os.makedirs(out_dir, exist_ok=True) orig_file = os.path.join(root, name) new_file = os.path.join(out_dir, (((speaker + '-') + name.split('/')[(- 1)].split('.')[0]) + '.wav')) bashCommand = ((('sox ' + orig_file) + ' -t wav -c 1 -r 16000 -b 16 -e signed-integer ') + new_file) r = os.popen(bashCommand).read()
def sox_mp3_to_wav(in_root, out_root): os.makedirs(out_root, exist_ok=True) pool = Pool(16) inputs = [] for (root, dirs, files) in os.walk(in_root): print(('[Processing] enter directory %s' % root)) if (not len(files)): continue speaker = root.split('/')[(- 2)].split('_')[1] print(('[Processing] process %d audio files from speaker %s' % (len(files), speaker))) inputs.append((files, root, out_root, speaker)) pool.map(sox_func, inputs)
def get_preprocess_args(): parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.') parser.add_argument('-i', '--input_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to your LibriSpeech directory', required=False) parser.add_argument('-o', '--output_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to store output', required=False) parser.add_argument('-s', '--split_size', default=60, type=int, help='Split size in seconds', required=False) parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False) parser.add_argument('-n', '--name', default='-splitted', type=str, help='Name to append on the original directory', required=False) parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for computation', required=False) args = parser.parse_args() return args
def split_and_save(input_file, current_split, args): (wav, sr) = torchaudio.load(input_file) chunk_size = (args.split_size * sr) (quotient, remainder) = divmod(wav.size(1), chunk_size) sections = [chunk_size for _ in range(quotient)] sections.append(remainder) splitted_wav = torch.split(wav, split_size_or_sections=sections, dim=1) check_sum = 0 for (i, w) in enumerate(splitted_wav): check_sum += w.size(1) file_name = os.path.basename(input_file).split('.')[0] new_file_name = file_name.replace(file_name, ((file_name + '-') + str(i))) new_file_path = input_file.replace(current_split, (current_split + args.name)) new_file_path = new_file_path.replace(file_name, new_file_name) if (args.input_path != args.output_path): new_file_path = new_file_path.replace(args.input_path, args.output_path) os.makedirs(os.path.dirname(new_file_path), exist_ok=True) torchaudio.save(new_file_path, w, sr) assert (check_sum == wav.size(1))
def generate_splits(args, tr_set, audio_extension): for (i, s) in enumerate(tr_set): if os.path.isdir(os.path.join(args.input_path, s.lower())): s = s.lower() elif os.path.isdir(os.path.join(args.input_path, s.upper())): s = s.upper() else: assert NotImplementedError print('') todo = list(Path(os.path.join(args.input_path, s)).rglob(('*' + audio_extension))) print(f'Preprocessing data in: {s}, {len(todo)} audio files found.') print('Splitting audio to shorter length...', flush=True) Parallel(n_jobs=args.n_jobs)((delayed(split_and_save)(str(file), s, args) for file in tqdm(todo))) print('All done, saved at', args.output_path, 'exit.')
def main(): args = get_preprocess_args() if ('librilight' in args.input_path.lower()): SETS = ['small', 'medium', 'large'] elif ('librispeech' in args.input_path.lower()): SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other'] elif ('timit' in args.input_path.lower()): SETS = ['TRAIN', 'TEST'] else: raise NotImplementedError for (idx, s) in enumerate(SETS): print('\t', idx, ':', s) tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ') tr_set = [SETS[int(t)] for t in tr_set.split(' ')] generate_splits(args, tr_set, args.audio_extension)
def main(): if (not os.path.isdir(KALDI_ROOT)): print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT) exit() if (not os.path.isdir(INPUT_PATH)): print('Invalid path for the preprocessed timit dataset: ', INPUT_PATH) print("Please run 'preprocess_timit.py' first!") exit() if (not os.path.isdir(SOURCE_DIR)): print('Invalid path for the source directory: ', SOURCE_DIR) print('Please read the Wiki page for instructions!') exit() if (not os.path.isdir(OUTPUT_PATH)): os.mkdir(OUTPUT_PATH) (x, ids) = ([], []) for s in INPUT_SETS: with open(os.path.join(INPUT_PATH, (s + '_x.pkl')), 'rb') as fp: x += pickle.load(fp) with open(os.path.join(INPUT_PATH, (s + '_id.pkl')), 'rb') as fp: ids += pickle.load(fp) assert (len(x) == len(ids)) print('[TIMIT-to-ARK] - ', 'Total Dataset len:', len(x)) all_inputs = {} for (idx, i) in enumerate(ids): i = str(i).strip('.wav').split('/') i = ((i[(- 2)].upper() + '_') + i[(- 1)].upper()) all_inputs[i] = np.asarray(x[idx]) for s in OUTPUT_SETS: if (not os.path.isdir(SOURCE_DIR)): raise NotADirectoryError('Source directory does not exist!', SOURCE_DIR) if (not os.path.isdir(((OUTPUT_PATH + '/') + str(s)))): os.mkdir(((OUTPUT_PATH + '/') + str(s))) partial_outputs = {} with open(os.path.join(SOURCE_DIR, (s + '/feats.scp')), 'r') as f: lines = f.readlines() for line in lines: line = line.split(' ')[0] if (line in all_inputs): partial_outputs[line] = all_inputs[line] assert (len(lines) == len(partial_outputs)) ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:{}/raw_mel_{}.ark,{}/{}/feats.scp'.format(OUTPUT_PATH, str(s), OUTPUT_PATH, str(s)) with kaldi_io.open_or_fd(ark_scp_output, 'wb') as f: for (key, mat) in tqdm(partial_outputs.items()): kaldi_io.write_mat(f, mat, key=key) print((("[TIMIT-to-ARK] - All done, saved at '" + str(OUTPUT_PATH)) + "' exit."))
class ApcAudioDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(ApcAudioDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_len = [len(x_b) for x_b in x_batch] x_pad_batch = pad_sequence(x_batch, batch_first=True) return (x_pad_batch, x_len)
class FeatDataset(Dataset): 'Base On-the-fly feature dataset by Andy T. Liu' def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(FeatDataset, self).__init__() self.extracter = extracter self.task_config = task_config self.libri_root = libri_root self.sample_length = task_config['sequence_length'] if (self.sample_length > 0): print('[Dataset] - Sampling random segments for training, sample length:', self.sample_length) self.root = file_path tables = [pd.read_csv(os.path.join(file_path, (s + '.csv'))) for s in sets] self.table = pd.concat(tables, ignore_index=True).sort_values(by=['length'], ascending=False) print('[Dataset] - Training data from these sets:', str(sets)) if (max_timestep > 0): self.table = self.table[(self.table.length < max_timestep)] if (max_timestep < 0): self.table = self.table[(self.table.length > ((- 1) * max_timestep))] X = self.table['file_path'].tolist() X_lens = self.table['length'].tolist() self.num_samples = len(X) print('[Dataset] - Number of individual training instances:', self.num_samples) self.X = [] (batch_x, batch_len) = ([], []) for (x, x_len) in zip(X, X_lens): batch_x.append(x) batch_len.append(x_len) if (len(batch_x) == bucket_size): if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME) and (self.sample_length == 0)): self.X.append(batch_x[:(bucket_size // 2)]) self.X.append(batch_x[(bucket_size // 2):]) else: self.X.append(batch_x) (batch_x, batch_len) = ([], []) if (len(batch_x) > 1): self.X.append(batch_x) def _sample(self, x): if (self.sample_length <= 0): return x if (len(x) < self.sample_length): return x idx = random.randint(0, (len(x) - self.sample_length)) return x[idx:(idx + self.sample_length)] def __len__(self): return len(self.X) def collate_fn(self, items): items = items[0] return items
class WaveDataset(Dataset): 'Base waveform dataset for Disiller by Heng-Jui Chang' def __init__(self, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super().__init__() self.task_config = task_config self.libri_root = libri_root self.sample_length = task_config['sequence_length'] if (self.sample_length > 0): print('[Dataset] - Sampling random segments for training, sample length:', self.sample_length) self.root = file_path tables = [pd.read_csv(os.path.join(file_path, (s + '.csv'))) for s in sets] self.table = pd.concat(tables, ignore_index=True).sort_values(by=['length'], ascending=False) print('[Dataset] - Training data from these sets:', str(sets)) if (max_timestep > 0): self.table = self.table[(self.table.length < max_timestep)] if (max_timestep < 0): self.table = self.table[(self.table.length > ((- 1) * max_timestep))] X = self.table['file_path'].tolist() X_lens = self.table['length'].tolist() self.num_samples = len(X) print('[Dataset] - Number of individual training instances:', self.num_samples) self.X = [] (batch_x, batch_len) = ([], []) for (x, x_len) in zip(X, X_lens): batch_x.append(x) batch_len.append(x_len) if (len(batch_x) == bucket_size): if ((bucket_size >= 2) and (max(batch_len) > HALF_BATCHSIZE_TIME) and (self.sample_length == 0)): self.X.append(batch_x[:(bucket_size // 2)]) self.X.append(batch_x[(bucket_size // 2):]) else: self.X.append(batch_x) (batch_x, batch_len) = ([], []) if (len(batch_x) > 1): self.X.append(batch_x) def _sample(self, x): if (self.sample_length <= 0): return x if (len(x) < self.sample_length): return x idx = random.randint(0, (len(x) - self.sample_length)) return x[idx:(idx + self.sample_length)] def __len__(self): return len(self.X) def collate_fn(self, items): items = items[0] assert (len(items) == 4), '__getitem__ should return (wave_input, wave_orig, wave_len, pad_mask)' return items
def freeze_model(model): 'Freeze all parameters in a model.' for param in model.parameters(): param.requires_grad = False
class UpstreamPretrainExpert(nn.Module): '\n The Distiller pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super().__init__() self.datarc = datarc self.device = device self.multi_gpu = multi_gpu if (type(upstream_config) == str): self.upstream_config = yaml.load(open(upstream_config, 'r'), Loader=yaml.FullLoader) print('[UpstreamPretrainExpert] - Using upstream config from:', upstream_config) elif (type(upstream_config) == dict): self.upstream_config = upstream_config print('[UpstreamPretrainExpert] - Using upstream config from the previous experiment.') else: raise ValueError self._get_train_dataloader() print('[UpstreamPretrainExpert] - Initializing model...') model_config = DistillerConfig(self.upstream_config['distiller']) self.model = DistillerForPretrain(model_config, edict(self.upstream_config['teacher'])) if self.multi_gpu: self.model = torch.nn.DataParallel(self.model) print(('[UpstreamPretrainExpert] - Multi-GPU training Enabled: ' + str(torch.cuda.device_count()))) print(('[UpstreamPretrainExpert] - Number of parameters: ' + str(sum((p.numel() for p in self.model.parameters() if p.requires_grad))))) def _get_train_dataloader(self): dataset = OnlineWaveDataset(self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn) def load_model(self, all_states): if self.multi_gpu: self.model.module.distiller.load_state_dict(all_states['Distiller']) else: self.model.distiller.load_state_dict(all_states['Distiller']) def add_state_to_save(self, all_states): all_states['Distiller'] = (self.model.float().distiller.state_dict() if (not self.multi_gpu) else self.model.float().module.distiller.state_dict()) all_states['Config'] = self.upstream_config return all_states def get_train_dataloader(self): return self.dataloader def forward(self, data, records={}, global_step=0, log_step=1000, **kwargs): '\n Args:\n data:\n [wave_input, pad_mask]\n\n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss\n ' (wave_input, wave_orig, wave_len, pad_mask) = data wave_input = wave_input.to(self.device) wave_len = wave_len.to(self.device) pad_mask = pad_mask.type(wave_input.dtype).to(self.device) (loss, other_res) = self.model(wave_input, wave_orig, wave_len, pad_mask, return_other=((global_step % log_step) == 0)) if ((global_step % log_step) == 0): for (key, value) in other_res.items(): if isinstance(value, torch.Tensor): value = float(value.mean().cpu().item()) records[key] = value return (loss, records) def on_before_zero_grad(self): pass def log_records(self, records, logger, prefix, global_step, **kwargs): "\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n " for (key, values) in records.items(): if (isinstance(values, torch.Tensor) and (len(values.shape) > 1)): logger.add_image(f'{prefix}{key}', values, global_step=global_step) elif isinstance(values, float): logger.add_scalar(f'{prefix}{key}', values, global_step=global_step)
class DistillerForPretrain(nn.Module): '\n Distiller for pretraining\n ' def __init__(self, config: DistillerConfig, teacher_config: edict): super().__init__() self.config = config self.distiller = DistillerModel(config) self.teacher_config = teacher_config teacher = torch.hub.load('s3prl/s3prl', teacher_config.model) if ((teacher_config.model.find('hubert') >= 0) or (teacher_config.model.find('wav2vec2') >= 0)): teacher.model.encoder.layerdrop = 0 print("[DistillerForPretrain] - Disabled teacher's encoder layerdrop") assert (self.distiller.n_tasks <= teacher_config.n_layers), (self.distiller.n_tasks, teacher_config.n_layers) self.teacher = teacher freeze_model(self.teacher) print('[DistillerForPretrain] - Using {} as teacher with {} layers'.format(teacher_config.model, teacher_config.n_layers)) if (config.loss_type == 'l1'): self.loss_func = nn.L1Loss(reduction='none') elif (config.loss_type == 'l2'): self.loss_func = nn.MSELoss(reduction='none') else: raise NotImplementedError(config.loss_type) self.cosine_loss = config.cosine_loss if (self.cosine_loss > 0): print('[DistillerForPretrain] - Enabled cosine similarity loss.') if config.init_teacher_conv_layers: print('[DistillerForPretrain] - Initializing feature extractor from teacher') self.distiller.feature_extractor.load_state_dict(self.teacher.model.feature_extractor.state_dict()) if (self.distiller.post_extract_proj is not None): self.distiller.post_extract_proj.load_state_dict(self.teacher.model.post_extract_proj.state_dict()) if config.init_teacher_encoder_layers: print('[DistillerForPretrain] - Initializing encoder from teacher') self.distiller.encoder.pos_conv.load_state_dict(self.teacher.model.encoder.pos_conv.state_dict()) for l in range(config.encoder_layers): self.distiller.encoder.layers[l].load_state_dict(self.teacher.model.encoder.layers[l].state_dict()) def forward(self, wave_input: torch.Tensor, wave_orig: list, wave_len: torch.Tensor, pad_mask: torch.Tensor, return_other: bool=False): '\n Forward function.\n Input:\n wave_input: FloatTensor (B x T_wave)\n wave_orig: List of FloatTensor\n wave_len: LongTensor (B)\n pad_mask: FloatTensor (B x T)\n return_other: Bool (returns other information for logging)\n ' (feat, feat_final, pred, pad_mask) = self.distiller(wave_input, pad_mask) with torch.no_grad(): wave_orig = [wave.to(wave_input.device) for wave in wave_orig] with torch.cuda.amp.autocast(False): teacher_hiddens = self.teacher(wave_orig) if (self.config.task_emb_type == 'none'): teacher_hiddens = teacher_hiddens['hidden_states'][self.config.n_tasks] teacher_hiddens = teacher_hiddens.unsqueeze(1) else: if (self.config.task_emb_type in ['expand-last', 'hnet', 'self-hidden']): teacher_hiddens = [teacher_hiddens['hidden_states'][i] for i in self.distiller.pred_layer_id] else: teacher_hiddens = teacher_hiddens['hidden_states'][1:] teacher_hiddens = torch.stack(teacher_hiddens, dim=1) (total_loss, rec_loss, rec_layer_loss, feat_pen, sim_loss, sim_layer_loss) = self.compute_loss(feat, pred, teacher_hiddens, return_other) if return_other: with torch.no_grad(): other_res = {'rec_loss': rec_loss, 'feat_pen': feat_pen, 'sim_loss': sim_loss, 'norm_feat_final': feat_final.pow(2).mean()} teacher_norm = torch.abs(teacher_hiddens).mean((0, 2, 3)) if (self.config.task_emb_type == 'none'): other_res[f'rec_l{self.config.n_tasks}'] = rec_layer_loss[0] other_res[f'tar_norm_l{self.config.n_tasks}'] = teacher_norm[0] if (sim_layer_loss is not None): other_res[f'sim_l{self.config.n_tasks}'] = sim_layer_loss[0] else: for i in range(self.config.n_tasks): layer_id = (i + 1) if (self.config.task_emb_type in ['expand-last', 'hnet', 'self-hidden']): layer_id = self.distiller.pred_layer_id[i] other_res[f'rec_l{layer_id}'] = rec_layer_loss[i] other_res[f'tar_norm_l{layer_id}'] = teacher_norm[i] if (sim_layer_loss is not None): other_res[f'sim_l{layer_id}'] = sim_layer_loss[i] if (self.config.task_emb_type not in ['expand-last', 'hnet', 'self-hidden']): other_res['norm_task_emb'] = self.distiller.task_embedding.weight.pow(2).mean() else: other_res = None return (total_loss, other_res) def compute_loss(self, feat, pred, target, return_other=False): '\n Computes loss.\n Inputs:\n feat: B x T x D\n pred: B x N x T x D\n target: B x N x T x D\n ' assert (pred.shape == target.shape), (pred.shape, target.shape) rec_loss = self.loss_func(pred, target) if return_other: with torch.no_grad(): rec_layer_loss = rec_loss.mean((0, 2, 3)) else: rec_layer_loss = None rec_loss = rec_loss.mean() if (self.cosine_loss > 0): sim_loss = (- F.logsigmoid(F.cosine_similarity(pred, target, dim=(- 1)))) if return_other: with torch.no_grad(): sim_layer_loss = sim_loss.mean((0, 2)) else: sim_layer_loss = None sim_loss = sim_loss.mean() else: sim_loss = 0 sim_layer_loss = None feat_pen = feat.float().pow(2).mean() total_loss = ((rec_loss + (feat_pen * self.config.feat_pen_loss)) + (sim_loss * self.cosine_loss)) return (total_loss, rec_loss, rec_layer_loss, feat_pen, sim_loss, sim_layer_loss)
class KaldiAcousticDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(KaldiAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav.squeeze()) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return generate_masked_acoustic_model_data(spec=(x_pad_batch,), config=self.task_config)
class OnlineAcousticDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, target_level=(- 25), **kwargs): max_timestep *= 160 super(OnlineAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) self.target_level = target_level self.sample_length = (self.sample_length * 160) def _normalize_wav_decibel(self, wav): 'Normalize the signal to the target level' if (self.target_level == 'None'): return wav rms = wav.pow(2).mean().pow(0.5) scalar = ((10 ** (self.target_level / 20)) / (rms + 1e-10)) wav = (wav * scalar) return wav def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) wav = self._normalize_wav_decibel(wav.squeeze()) return wav def _process_x_pad_batch(self, x_pad_batch): if (self.libri_root is not None): x_pad_batch = x_pad_batch.unsqueeze(1) feat_list = self.extracter(x_pad_batch) return generate_masked_acoustic_model_data(feat_list, config=self.task_config) def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return self._process_x_pad_batch(x_pad_batch)
class UpstreamPretrainExpert(nn.Module): '\n The Mockingjay pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super(UpstreamPretrainExpert, self).__init__() self.datarc = datarc self.device = device self.multi_gpu = multi_gpu if (type(upstream_config) == str): self.upstream_config = yaml.load(open(upstream_config, 'r'), Loader=yaml.FullLoader) print('[UpstreamPretrainExpert] - Using upstream config from:', upstream_config) elif (type(upstream_config) == dict): self.upstream_config = upstream_config print('[UpstreamPretrainExpert] - Using upstream config from the previous experiment.') else: raise ValueError if (('libri_root' in self.datarc) and ('kaldi' in self.upstream_config['audio'])): print('[UpstreamPretrainExpert] - Using kaldi feature extracter, on-the-fly feature extraction') (extracter, input_dim, _) = get_extracter(self.upstream_config['audio']) output_dim = None elif ('libri_root' in self.datarc): print('[UpstreamPretrainExpert] - Using online preprocessor, on-the-fly feature extraction') (extracter, input_dim, output_dim) = get_preprocessor(self.upstream_config['audio']) else: print('[UpstreamPretrainExpert] - Using features pre-extracted and saved') (extracter, input_dim) = (None, self.upstream_config['transformer']['input_dim']) output_dim = None print('[UpstreamPretrainExpert] - Input dim:', input_dim) self._get_train_dataloader(extracter) print('[UpstreamPretrainExpert] - Initializing model...') model_config = TransformerConfig(self.upstream_config['transformer']) setattr(model_config, 'loss', self.upstream_config['task']['loss']) self.model = TransformerForMaskedAcousticModel(model_config, input_dim, output_dim=output_dim) if self.multi_gpu: self.model = torch.nn.DataParallel(self.model) print(('[UpstreamPretrainExpert] - Multi-GPU training Enabled: ' + str(torch.cuda.device_count()))) print(('[UpstreamPretrainExpert] - Number of parameters: ' + str(sum((p.numel() for p in self.model.parameters() if p.requires_grad))))) def _get_train_dataloader(self, extracter): if (('libri_root' in self.datarc) and ('kaldi' not in self.upstream_config['audio'])): dataset = OnlineAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) else: dataset = KaldiAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn) def load_model(self, init_ckpt): assert (('Transformer' in init_ckpt) and ('SpecHead' in init_ckpt)) if self.multi_gpu: self.model.module.Transformer.load_state_dict(init_ckpt['Transformer']) self.model.module.SpecHead.load_state_dict(init_ckpt['SpecHead']) else: self.model.Transformer.load_state_dict(init_ckpt['Transformer']) self.model.SpecHead.load_state_dict(init_ckpt['SpecHead']) def loss_to_device(self): (self.model.loss.to(self.device) if (not self.multi_gpu) else self.model.module.loss.to(self.device)) def add_state_to_save(self, all_states): all_states['SpecHead'] = (self.model.SpecHead.state_dict() if (not self.multi_gpu) else self.model.module.SpecHead.state_dict()) all_states['Transformer'] = (self.model.Transformer.state_dict() if (not self.multi_gpu) else self.model.module.Transformer.state_dict()) all_states['Upstream_Config'] = self.upstream_config return all_states def get_train_dataloader(self): return self.dataloader def forward(self, data, records={}, global_step=0, log_step=1000, **kwargs): '\n Args:\n data:\n [spec_masked, pos_enc, mask_label, attn_mask, spec_target]\n \n records:\n defaultdict(list), by appending contents into records,\n these contents can be averaged and logged on Tensorboard\n later by self.log_records every log_step\n\n Return:\n loss \n ' (spec_masked, pos_enc, mask_label, attn_mask, spec_target) = (data[0], data[1], data[2], data[3], data[4]) spec_masked = spec_masked.to(self.device) if (pos_enc.dim() == 3): pos_enc = pos_enc.to(self.device) elif (pos_enc.dim() == 2): pos_enc = pos_enc.to(self.device).expand(spec_masked.size(0), *pos_enc.size()) mask_label = mask_label.to(self.device) attn_mask = attn_mask.to(self.device) spec_target = spec_target.to(self.device) (loss, pred_spec) = self.model(spec_masked, pos_enc, mask_label, attn_mask, spec_target) if ((global_step % log_step) == 0): spec_list = [spec_masked, pred_spec, spec_target] name_list = ['mask_spec', 'pred_spec', 'true_spec'] for i in range(len(spec_list)): spec = plot_spectrogram_to_numpy(spec_list[i][0].data.cpu().numpy()) records[name_list[i]] = spec return (loss, records) def on_before_zero_grad(self): pass def log_records(self, records, logger, prefix, global_step, **kwargs): "\n Args:\n records:\n defaultdict(list), contents already appended\n\n logger:\n Tensorboard SummaryWriter\n please use f'{prefix}your_content_name' as key name\n to log your customized contents\n\n prefix:\n used to indicate downstream and train/test on Tensorboard\n eg. 'phone/train-'\n\n global_step:\n global_step in runner, which is helpful for Tensorboard logging\n " for (key, values) in records.items(): logger.add_image(f'{prefix}{key}', values, global_step=global_step)
class TransformerForMaskedAcousticModel(TransformerInitModel): "\n Transformer model with the masked acoustic modeling head.\n This module comprises the Transformer model followed by the masked acoustic modeling head.\n\n Params:\n `config`: a TransformerConfig class instance with the configuration to build a new model\n `intput_dim`: int, input dimension of model\n `output_dim`: int, output dimension of model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `spec_input`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n `pos_enc`: a torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n `masked_label`: masked acoustic modeling labels - torch.LongTensor of shape [batch_size, sequence_length]\n with indices selected in [1, 0]. All labels set to -1 are ignored, the loss\n is only computed for the labels set to 1.\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `spce_label`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n which are the ground truth spectrogram used as reconstruction labels.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n Outputs:\n if `spec_label` and `mask_label` is not `None`:\n Outputs the masked acoustic modeling loss and predicted spectrogram.\n if `spec_label` and `mask_label` is `None`:\n Outputs the masked acoustic modeling predicted spectrogram of shape [batch_size, sequence_length, output_dim].\n\n Example usage:\n\n ```python\n spec_input = torch.LongTensor(spec_frames)\n pos_enc = torch.LongTensor(position_encoding(seq_len=len(spec_frames)))\n\n config = TransformerConfig(config)\n\n model = TransformerForMaskedAcousticModel(config)\n masked_spec_logits = model(spec_input, pos_enc)\n ```\n " def __init__(self, config, input_dim, output_dim, output_attentions=False, keep_multihead_output=False): super(TransformerForMaskedAcousticModel, self).__init__(config, output_attentions) self.Transformer = TransformerModel(config, input_dim, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output) self.SpecHead = TransformerSpecPredictionHead(config, (output_dim if (output_dim is not None) else input_dim)) self.apply(self.init_Transformer_weights) loss = {'L1': nn.L1Loss(), 'MSE': nn.MSELoss()} self.loss = (loss[config.loss] if hasattr(config, 'loss') else loss['L1']) def forward(self, spec_input, pos_enc, mask_label=None, attention_mask=None, spec_label=None, head_mask=None): outputs = self.Transformer(spec_input, pos_enc, attention_mask, output_all_encoded_layers=False, head_mask=head_mask) if self.output_attentions: (all_attentions, sequence_output) = outputs else: sequence_output = outputs (pred_spec, pred_state) = self.SpecHead(sequence_output) if ((spec_label is not None) and (mask_label is not None)): assert (mask_label.sum() > 0), 'Without any masking, loss might go NaN. Check your pretrain data processing (s3prl/pretrain/mockingjay/task.py)' masked_spec_loss = self.loss(pred_spec.masked_select(mask_label), spec_label.masked_select(mask_label)) return (masked_spec_loss, pred_spec) elif self.output_attentions: return (all_attentions, pred_spec) return (pred_spec, pred_state)
class ApcAudioDataset(FeatDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(ApcAudioDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def _load_feat(self, feat_path): if (self.libri_root is None): return torch.FloatTensor(np.load(os.path.join(self.root, feat_path))) else: (wav, _) = torchaudio.load(os.path.join(self.libri_root, feat_path)) feat = self.extracter(wav) return feat def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_len = [len(x_b) for x_b in x_batch] x_pad_batch = pad_sequence(x_batch, batch_first=True) return (x_pad_batch, x_len)
class Runner(): '\n Used to handle high-level concepts of a ML experiment\n eg. training loop, evaluation loop, upstream propagation, optimization, tensorboard logging, checkpoint saving\n ' def __init__(self, args, config): self.args = args self.config = config self.logger = SummaryWriter(args.expdir) self.init_ckpt = (torch.load(self.args.init_ckpt, map_location='cpu') if self.args.init_ckpt else {}) self.upstream = self._get_upstream() def _get_upstream(self): init_upstream = self.init_ckpt.get('Upstream_Config') if init_upstream: self.args.upstream_config = init_upstream module_path = f'pretrain.{self.args.upstream}.pretrain_expert' Upstream = getattr(importlib.import_module(module_path), 'UpstreamPretrainExpert') upstream = Upstream(self.config['pretrain_expert']['datarc'], self.args.upstream_config, self.args.device, self.args.multi_gpu).to(self.args.device) assert hasattr(upstream, 'device') assert hasattr(upstream, 'forward') assert hasattr(upstream, 'load_model') assert hasattr(upstream, 'add_state_to_save') assert hasattr(upstream, 'on_before_zero_grad') assert hasattr(upstream, 'get_train_dataloader') if (self.init_ckpt != {}): print('[Runner] - Loading upstream weights from the previous experiment') upstream.load_model(self.init_ckpt) if hasattr(upstream, 'loss_to_device'): print('[Runner] - Loss to device') upstream.loss_to_device() return upstream def _get_optimizer(self, model_params): optimizer = get_optimizer(model_params, self.config['runner']['total_steps'], self.config['optimizer']) if (self.init_ckpt != {}): init_optimizer = self.init_ckpt.get('Optimizer') assert init_optimizer print('[Runner] - Loading optimizer weights from the previous experiment') optimizer.load_state_dict(init_optimizer) return optimizer def _get_scheduler(self, optimizer): scheduler = get_scheduler(optimizer, self.config['runner']['total_steps'], self.config['scheduler']) if (self.init_ckpt != {}): init_scheduler = self.init_ckpt.get('Scheduler') assert init_scheduler print('[Runner] - Loading scheduler weights from the previous experiment') scheduler.load_state_dict(init_scheduler) return scheduler def train(self): self.upstream.train() gradient_accumulate_steps = self.config['runner']['gradient_accumulate_steps'] train_batch_size = self.config['pretrain_expert']['datarc']['train_batch_size'] print('[Runner] - Accumulated batch size:', (train_batch_size * gradient_accumulate_steps)) dataloader = self.upstream.get_train_dataloader() n_epochs = self.config['runner']['n_epochs'] if (n_epochs > 0): total_steps = int(((n_epochs * len(dataloader.dataset)) / gradient_accumulate_steps)) print(f'[Runner] - Training for {n_epochs} epochs, which is equivalent to {total_steps} steps') else: total_steps = self.config['runner']['total_steps'] n_epochs = int(((total_steps * gradient_accumulate_steps) / len(dataloader.dataset))) print(f'[Runner] - Training for {total_steps} steps, which is approximately {n_epochs} epochs') assert (total_steps > self.config['runner']['log_step']) assert (total_steps > self.config['runner']['save_step']) amp = self.config['runner'].get('fp16', False) if amp: print('[Runner] - Enabled fp16 training') scaler = torch.cuda.amp.GradScaler() model_params = [self.upstream.model] optimizer = self._get_optimizer(model_params) scheduler = None if self.config.get('scheduler'): scheduler = self._get_scheduler(optimizer) pbar = tqdm(total=total_steps, dynamic_ncols=True, desc='overall') init_step = self.init_ckpt.get('Step') if init_step: pbar.n = init_step all_loss = 0 backward_steps = 0 records = defaultdict(list) prefix = f'{self.args.upstream}/train-' while (pbar.n < pbar.total): for data in tqdm(dataloader, dynamic_ncols=True, desc='train'): try: if (pbar.n >= pbar.total): break global_step = (pbar.n + 1) with torch.cuda.amp.autocast(enabled=amp): (loss, records) = self.upstream(data, records=records, global_step=global_step, log_step=self.config['runner']['log_step']) if (gradient_accumulate_steps > 1): loss = (loss / gradient_accumulate_steps) if self.args.multi_gpu: loss = loss.sum() if amp: scaler.scale(loss).backward() else: loss.backward() except RuntimeError as e: if ('CUDA out of memory' in str(e)): print(f'[Runner] - CUDA out of memory at step {global_step}') torch.cuda.empty_cache() optimizer.zero_grad() continue else: raise all_loss += loss.item() del loss backward_steps += 1 if ((backward_steps % gradient_accumulate_steps) > 0): continue if amp: scaler.unscale_(optimizer) grad_norm = torch.nn.utils.clip_grad_norm_(self.upstream.model.parameters(), self.config['runner']['gradient_clipping']) if math.isnan(grad_norm): print(f'[Runner] - Error : grad norm is NaN at global step {global_step}') if amp: scaler.step(optimizer) scaler.update() elif (not math.isnan(grad_norm)): optimizer.step() self.upstream.on_before_zero_grad() optimizer.zero_grad() if scheduler: scheduler.step() if (((global_step % self.config['runner']['log_step']) == 0) or (pbar.n == (pbar.total - 1))): self.logger.add_scalar(f'{prefix}loss', all_loss, global_step=global_step) if hasattr(optimizer, 'get_lr'): self.logger.add_scalar(f'{prefix}lr', optimizer.get_lr()[0], global_step=global_step) else: self.logger.add_scalar(f'{prefix}lr', self.config['optimizer']['lr'], global_step=global_step) self.logger.add_scalar(f'{prefix}gradient-norm', grad_norm, global_step=global_step) self.upstream.log_records(records=records, logger=self.logger, prefix=prefix, global_step=global_step) records = defaultdict(list) if (((global_step % self.config['runner']['save_step']) == 0) or (pbar.n == (pbar.total - 1))): def check_ckpt_num(directory): max_keep = self.config['runner']['max_keep'] ckpt_pths = glob.glob(f'{directory}/states-*.ckpt') if (len(ckpt_pths) >= max_keep): ckpt_pths = sorted(ckpt_pths, key=(lambda pth: int(pth.split('-')[(- 1)].split('.')[0]))) for ckpt_pth in ckpt_pths[:((len(ckpt_pths) - max_keep) + 1)]: os.remove(ckpt_pth) check_ckpt_num(self.args.expdir) all_states = {'Optimizer': optimizer.state_dict(), 'Step': pbar.n, 'Args': self.args, 'Config': self.config} all_states = self.upstream.add_state_to_save(all_states) if scheduler: all_states['Scheduler'] = scheduler.state_dict() name = (f'states-epoch-{n_epochs}.ckpt' if ((pbar.n == (pbar.total - 1)) and (n_epochs > 0)) else f'states-{global_step}.ckpt') save_path = os.path.join(self.args.expdir, name) tqdm.write(f'[Runner] - Save the checkpoint to: {save_path}') torch.save(all_states, save_path) all_loss = 0 pbar.update(1) pbar.close()
class KaldiAcousticDataset(_KaldiAcousticDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, **kwargs): super(KaldiAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, **kwargs) def __getitem__(self, index): x_batch = [self._sample(self._load_feat(x_file)) for x_file in self.X[index]] x_pad_batch = pad_sequence(x_batch, batch_first=True) return generate_spec_aug_data(spec=(x_pad_batch,), config=self.task_config)
class OnlineAcousticDataset(_OnlineAcousticDataset): def __init__(self, extracter, task_config, bucket_size, file_path, sets, max_timestep=0, libri_root=None, target_level=(- 25), **kwargs): super(OnlineAcousticDataset, self).__init__(extracter, task_config, bucket_size, file_path, sets, max_timestep, libri_root, target_level, **kwargs) def _process_x_pad_batch(self, x_pad_batch): if (self.libri_root is not None): x_pad_batch = x_pad_batch.unsqueeze(1) feat_list = self.extracter(x_pad_batch) return generate_spec_aug_data(feat_list, config=self.task_config)
class UpstreamPretrainExpert(MockingjayPretrainExpert): '\n The spec augment transformer pretrain expert\n ' def __init__(self, datarc, upstream_config, device='cuda', multi_gpu=False, **kwargs): super(UpstreamPretrainExpert, self).__init__(datarc, upstream_config, device, multi_gpu, **kwargs) def _get_train_dataloader(self, extracter): if (('libri_root' in self.datarc) and ('kaldi' not in self.upstream_config['audio'])): dataset = OnlineAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], target_level=self.upstream_config['audio']['target_level'], **self.datarc) else: dataset = KaldiAcousticDataset(extracter, self.upstream_config['task'], self.datarc['train_batch_size'], **self.datarc) self.dataloader = DataLoader(dataset, batch_size=1, shuffle=True, num_workers=self.datarc['num_workers'], drop_last=False, pin_memory=True, collate_fn=dataset.collate_fn)
class ASR(Problem): def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, prepare_data: dict=None, prepare_tokenizer_data: dict=None, build_tokenizer: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None): '\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file for ASR (waveform path, label...)\n 1 Prepare the metadata file for training tokenizer\n 2 Train the tokenizer\n 3 Train the ASR model\n 4 Evaluate the model on multiple test sets, multiple checkpoints will be evaluated for each test set (See :code:`test_ckpt_steps`)\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n **others:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n ' yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml') yaml_path.parent.mkdir(exist_ok=True, parents=True) with yaml_path.open('w') as f: yaml.safe_dump(self._get_current_arguments(), f) cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data')) prepare_data: dict = (prepare_data or {}) prepare_tokenizer_data: dict = (prepare_tokenizer_data or {}) build_tokenizer: dict = (build_tokenizer or {}) build_dataset: dict = (build_dataset or {}) build_batch_sampler: dict = (build_batch_sampler or {}) build_collate_fn: dict = (build_collate_fn or {}) build_upstream: dict = (build_upstream or {}) build_featurizer: dict = (build_featurizer or {}) build_downstream: dict = (build_downstream or {}) build_model: dict = (build_model or {}) build_task: dict = (build_task or {}) build_optimizer: dict = (build_optimizer or {}) build_scheduler: dict = (build_scheduler or {}) save_model: dict = (save_model or {}) save_task: dict = (save_task or {}) train: dict = (train or {}) evaluate = (evaluate or {}) target_dir: Path = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) cache_dir = Path(cache_dir) cache_dir.mkdir(exist_ok=True, parents=True) if remove_all_cache: shutil.rmtree(cache_dir) stage_id = 0 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare data') (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False) (train_csv, valid_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True) def check_fn(): assert (Path(train_csv).is_file() and Path(valid_csv).is_file()) for test_csv in test_csvs: assert Path(test_csv).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 1 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare tokenizer data') tokenizer_data_path = self.prepare_tokenizer_data(prepare_tokenizer_data, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=False) tokenizer_data_path = self.prepare_tokenizer_data(prepare_tokenizer_data, target_dir, cache_dir, train_csv, valid_csv, test_csvs, get_path_only=True) def check_fn(): assert Path(tokenizer_data_path).exists() self._stage_check(stage_id, stop, check_fn) stage_id = 2 if (start <= stage_id): logger.info(f'Stage {stage_id}: build tokenizer') tokenizer_path = self.build_tokenizer(build_tokenizer, target_dir, cache_dir, tokenizer_data_path, get_path_only=False) tokenizer_path = self.build_tokenizer(build_tokenizer, target_dir, cache_dir, tokenizer_data_path, get_path_only=True) def check_fn(): assert Path(tokenizer_path).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 3 train_dir = (target_dir / 'train') if (start <= stage_id): logger.info(f'Stage {stage_id}: Train Model') (train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, tokenizer_path, build_dataset, build_batch_sampler) (valid_ds, valid_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'valid', valid_csv, tokenizer_path, build_dataset, build_batch_sampler) with Path(tokenizer_path).open('rb') as f: tokenizer = pickle.load(f) build_model_all_args = dict(build_model=build_model, model_output_size=len(tokenizer), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream) build_task_all_args_except_model = dict(build_task=build_task, tokenizer=tokenizer) self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), valid_ds, valid_bs, self.build_collate_fn(build_collate_fn, 'valid'), device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank) def check_fn(): assert (train_dir / 'valid_best').is_dir() self._stage_check(stage_id, stop, check_fn) stage_id = 4 if (start <= stage_id): test_ckpt_dir: Path = Path((test_ckpt_dir or ((target_dir / 'train') / 'valid_best'))) logger.info(f'Stage {stage_id}: Test model: {test_ckpt_dir}') for (test_idx, test_csv) in enumerate(test_csvs): test_name = Path(test_csv).stem test_dir: Path = (((target_dir / 'evaluate') / test_ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) / test_name) test_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Stage {stage_id}.{test_idx}: Test model on {test_csv}') (test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, tokenizer_path, build_dataset, build_batch_sampler) (_, valid_best_task) = self.load_model_and_task(test_ckpt_dir) logs: dict = self.evaluate(evaluate, 'test', valid_best_task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers) test_metrics = {name: float(value) for (name, value) in logs.items()} logger.info(f'test results: {test_metrics}') with (test_dir / f'result.yaml').open('w') as f: yaml.safe_dump(test_metrics, f) return stage_id def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str, build_dataset: dict, build_batch_sampler: dict): logger.info(f'Build {mode} dataset') dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, tokenizer_path) logger.info(f'Build {mode} batch sampler') batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset) return (dataset, batch_sampler) def build_task(self, build_task: dict, model, tokenizer): task = Speech2TextCTCTask(model, tokenizer, **build_task) return task
def prepare_librispeech(target_dir, cache_dir, dataset_root, train_sets: List[str], valid_sets: List[str], test_sets: List[str], n_jobs: int=6, get_path_only: bool=False): '\n Prepare LibriSpeech for ASR following :obj:`SuperbASR.prepare_data` format.\n See :obj:`LibriSpeech` for the arguments usage\n ' target_dir = Path(target_dir) train_path = (target_dir / f"{'+'.join(train_sets)}.csv") valid_path = (target_dir / f"{'+'.join(valid_sets)}.csv") test_paths = [(target_dir / f'{test_set}.csv') for test_set in test_sets] if get_path_only: return (train_path, valid_path, test_paths) corpus = LibriSpeech(dataset_root, n_jobs, train_sets, valid_sets, test_sets) (train_data, valid_data, test_data) = corpus.data_split def dict_to_csv(data_dict, csv_path): keys = sorted(list(data_dict.keys())) fields = sorted(data_dict[keys[0]].keys()) data = dict() for field in fields: data[field] = [] for key in keys: data[field].append(data_dict[key][field]) data['id'] = keys df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
def prepare_common_tokenizer(target_dir, cache_dir, tokenizer_data_path, get_path_only=False, tokenizer_name: str=None, vocab_file: str=None, vocab_type: str='character', vocab_args: dict=None, slots_file: str=None): '\n Build the tokenizer following :obj:`SuperbASR.build_tokenizer` format\n\n Args:\n tokenizer_name (str): Save the tokenizer filepath with this filename\n vocab_file (str): When the tokenizer was already prepared, and just want\n to load and return the tokenizer here. Path or URL\n vocab_type (str): character / phoneme / word / subword\n vocab_args (dict):\n when :code:`vocab_type` is character / phoneme / word, supports arguments in\n :obj:`s3prl.dataio.encoder.vocabulary.generate_basic_vocab`\n\n whe :code:`vocab_type` is subword, supports arguments in\n :obj:`s3prl.dataio.encoder.vocabulary.generate_subword_vocab`\n slots_file (str): If presented, the pre-defined slots will be used to encode the\n special tokens. Path or URL\n\n Return:\n str\n\n tokenizer path\n ' if (tokenizer_name is None): tokenizer_name = f'{Path(tokenizer_data_path).stem}-{vocab_type}.tokenizer' tokenizer_path = (Path(target_dir) / f'{tokenizer_name}.pkl') if get_path_only: return tokenizer_path if (vocab_file is not None): vocab_file = str(vocab_file) if vocab_file.startswith('http'): vocab_file = urls_to_filepaths(vocab_file) if (slots_file is not None): slots_file = str(slots_file) if slots_file.startswith('http'): slots_file = urls_to_filepaths(slots_file) if (vocab_file is not None): tokenizer = load_tokenizer(vocab_type, vocab_file=vocab_file, slots_file=slots_file) else: vocab_args = (vocab_args or {}) assert isinstance(vocab_args, dict) if ((vocab_type == 'subword') and (not ('output_file' in vocab_args))): vocab_args['output_file'] = (Path(target_dir) / 'tokenizer.spm') vocab_result = generate_vocab(vocab_type, text_file=str(tokenizer_data_path), **vocab_args) vocab_list = (vocab_result if isinstance(vocab_result, list) else None) vocab_file = (vocab_result if isinstance(vocab_result, str) else None) tokenizer = load_tokenizer(vocab_type, vocab_file=vocab_file, vocab_list=vocab_list, slots_file=slots_file) with open(tokenizer_path, 'wb') as f: pickle.dump(tokenizer, f) return tokenizer_path
class SuperbASR(ASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_sets=['train-clean-100'], valid_sets=['dev-clean'], test_sets=['test-clean']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='character'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, max_length=2000, shuffle=True), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(model_conf=dict(module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_conf=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['cer', 'wer']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='wer', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`prepare_librispeech` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`prepare_librispeech`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n transcription (str) - a text string\n ==================== ====================\n ' return prepare_librispeech(**self._get_current_arguments(flatten_dict='prepare_data')) def prepare_tokenizer_data(self, prepare_tokenizer_data: dict, target_dir: str, cache_dir: str, train_csv: str, valid_csv: str, test_csvs: List[str], get_path_only: bool=False): '\n Prepare the text file used for training tokenizer.\n By default only use the transcription in the :code:`train_csv` returned from :obj:`prepare_data`\n The default :code:`prepare_tokenizer_data` build the character-based tokenizer\n\n Args:\n prepare_tokenizer_data (dict): same in :obj:`default_config`, no supported argument for now\n target_dir (str): Save the text file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv (str): The train data given by :obj:`prepare_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n The text file path, the text file should be in the format\n\n .. code-block:: none\n\n This is the first line\n This is the second line\n These are all text used for training tokenizer\n\n ' tokenizer_data_name = f'{Path(train_csv).stem}.tokenizer_data' tokenizer_data_path = (Path(target_dir) / f'{tokenizer_data_name}.txt') if get_path_only: return tokenizer_data_path all_text = pd.read_csv(train_csv)['transcription'] with tokenizer_data_path.open('w') as f: f.writelines([f'''{line} ''' for line in all_text]) return tokenizer_data_path def build_tokenizer(self, build_tokenizer: dict, target_dir: str, cache_dir: str, tokenizer_data_path: str, get_path_only: bool=False): '\n Build the tokenizer from the data prepared by :obj:`prepare_tokenizer_data`\n By default call :obj:`prepare_common_tokenizer` with :code:`**build_tokenizer`\n\n Args:\n build_tokenizer (dict): same in :obj:`default_config`, arguments for :obj:`prepare_common_tokenizer`\n target_dir (str): Current experinment directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n tokenizer_data_path (str): The text file from :obj:`prepare_tokenizer_data`\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n str\n\n filepath of the pickled :obj:`s3prl.dataio.encoder.tokenizer.Tokenizer`\n ' return prepare_common_tokenizer(**self._get_current_arguments(flatten_dict='build_tokenizer')) def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str): '\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, not used\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n tokenizer_path (str): The pickled tokenizer path for encoding transcription\n\n Returns:\n torch Dataset\n\n For all train/valid/test mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_ids (torch.LongTensor) - the encoded class ids of a transcription (sentence)\n labels (str) - the text transcription\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n ' csv = pd.read_csv(data_csv) audio_loader = LoadAudio(csv['wav_path'].tolist()) with open(tokenizer_path, 'rb') as f: tokenizer = pickle.load(f) text_encoder = EncodeText(csv['transcription'].tolist(), tokenizer) ids = csv['id'].tolist() class Speech2TextDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] text = text_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': text['class_ids'], 'labels': text['labels'], 'unique_name': ids[index]} dataset = Speech2TextDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset: Dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedBucketingSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedBucketingSampler(wav_lens, **(conf.train or {})) elif (mode == 'valid'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) return sampler def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`RNNEncoder` model wrapped with :obj:`ModelWithSpecaug`\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, has two keys:\n :code:`model_conf` is the arguments for :obj:`RNNEncoder`;\n :code:`specaug_conf` is the arguments for :obj:`ModelWithSpecaug`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n " @dataclass class Config(): model_conf: dict = None specaug_conf: dict = None conf = Config(**build_downstream) model = RNNEncoder(downstream_input_size, downstream_output_size, **(conf.model_conf or {})) downstream = ModelWithSpecaug(model, **(conf.specaug_conf or {})) return downstream
class SuperbPR(SuperbASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_sets=['train-clean-100'], valid_sets=['dev-clean'], test_sets=['test-clean']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='phoneme'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=16, max_length=300000), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(hidden_size=256), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['per']), build_optimizer=dict(name='Adam', conf=dict(lr=0.01)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(extra_conf=dict(build_downstream_conf='${build_downstream}')), save_task=dict(), train=dict(total_steps=100000, log_step=100, eval_step=1000, save_step=100, gradient_clipping=1.0, gradient_accumulate=2, valid_metric='per', valid_higher_better=False, auto_resume=True, resume_ckpt_dir=None), evaluate=dict()) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): (train_csv, valid_csv, test_csvs) = super().prepare_data(prepare_data, target_dir, cache_dir, get_path_only) if get_path_only: return (train_csv, valid_csv, test_csvs) g2p = G2P() def phonemize(csv_path): df = pd.read_csv(csv_path) text = df['transcription'].tolist() phonemized_text = [g2p.encode(t.strip()) for t in text] df['transcription'] = phonemized_text df.to_csv(csv_path, index=False) for csv_path in [train_csv, valid_csv, *test_csvs]: phonemize(csv_path) return (train_csv, valid_csv, test_csvs) def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedSliceSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedSliceSampler(wav_lens, **(conf.train or {})) elif (mode == 'valid'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): sampler = FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) return sampler def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`FrameLevelLinear`\n\n Args:\n build_downstream (dict): same in :obj:`default_config`,\n supports arguments in :obj:`FrameLevelLinear`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsFrameModel`\n " return FrameLevelLinear(downstream_input_size, downstream_output_size, **build_downstream)
def audio_snips_for_slot_filling(target_dir: str, cache_dir: str, dataset_root: str, train_speakers: List[str], valid_speakers: List[str], test_speakers: List[str], get_path_only: bool=False): target_dir = Path(target_dir) train_path = (target_dir / f'train.csv') valid_path = (target_dir / f'valid.csv') test_paths = [(target_dir / f'test.csv')] if get_path_only: return (train_path, valid_path, test_paths) corpus = SNIPS(dataset_root, train_speakers, valid_speakers, test_speakers) (train_data, valid_data, test_data) = corpus.data_split def dict_to_csv(data_dict, csv_path): data_ids = sorted(list(data_dict.keys())) fields = sorted(data_dict[data_ids[0]].keys()) data = defaultdict(list) for data_id in data_ids: data_point = data_dict[data_id] trans = data_point['transcription'] trans = trans.replace('楽園追放', 'EXPELLED') trans = trans.replace('官方杂志', '') trans = trans.replace('–', '-') trans = trans.replace('&', ' AND ') trans = trans.translate(translator) trans = re.sub(' +', ' ', trans).strip(' ') words = trans.split(' ') iobs = data_point['iob'].split(' ') assert (len(words) == len(iobs)) filtered_words = [] filtered_iobs = [] for (word, iob) in zip(words, iobs): if (word in '?!.,;-–…'): continue filtered_words.append(word) filtered_iobs.append(iob) assert (len(filtered_words) == len(filtered_iobs)) data_point['transcription'] = ' '.join(filtered_words) data_point['iob'] = ' '.join(filtered_iobs) for field in fields: data[field].append(data_point[field]) data['id'] = data_ids df = pd.DataFrame(data) df.to_csv(csv_path, index=False) dict_to_csv(train_data, train_path) dict_to_csv(valid_data, valid_path) dict_to_csv(test_data, test_paths[0]) return (train_path, valid_path, test_paths)
class SuperbSF(SuperbASR): def default_config(self) -> dict: return dict(start=0, stop=None, target_dir=MISSING, cache_dir=None, remove_all_cache=False, prepare_data=dict(dataset_root=MISSING, train_speakers=['Ivy', 'Joanna', 'Joey', 'Justin', 'Kendra', 'Kimberly', 'Matthew', 'Salli'], valid_speakers=['Aditi', 'Amy', 'Geraint', 'Nicole'], test_speakers=['Brian', 'Emma', 'Raveena', 'Russell']), prepare_tokenizer_data=dict(), build_tokenizer=dict(vocab_type='character'), build_dataset=dict(), build_batch_sampler=dict(train=dict(batch_size=32, max_length=300000), valid=dict(batch_size=1), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_downstream=dict(model_conf=dict(module='LSTM', proj_size=1024, hidden_size=[1024, 1024], dropout=[0.2, 0.2], layer_norm=[False, False], proj=[False, False], sample_rate=[1, 1], sample_style='concat', bidirectional=True), specaug_conf=dict(freq_mask_width_range=(0, 50), num_freq_mask=4, time_mask_width_range=(0, 40), num_time_mask=2)), build_model=dict(upstream_trainable=False), build_task=dict(log_metrics=['wer', 'cer', 'slot_type_f1', 'slot_value_cer', 'slot_value_wer', 'slot_edit_f1_full', 'slot_edit_f1_part']), build_optimizer=dict(name='Adam', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), save_model=dict(), save_task=dict(), train=dict(total_steps=200000, log_step=100, eval_step=2000, save_step=500, gradient_clipping=1.0, gradient_accumulate=1, valid_metric='slot_type_f1', valid_higher_better=True, auto_resume=True, resume_ckpt_dir=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool=False): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`audio_snips_for_slot_filling` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`, support arguments in :obj:`audio_snips_for_slot_filling`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (str): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. valid_path (str)\n 3. test_paths (List[str])\n\n Each path (str) should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this data point\n wav_path (str) - the absolute path of the waveform file\n transcription (str) - a text string where words are separted by a space.\n Eg. "I want to fly from Taipei to New York"\n iob (str) - iob tags, use "O" if no tag, every word should have a tag, separted by a space.\n Eg. "O O O O O from_location O to_location to_location"\n ==================== ====================\n ' return audio_snips_for_slot_filling(**self._get_current_arguments(flatten_dict='prepare_data')) def prepare_tokenizer_data(self, prepare_tokenizer_data: dict, target_dir: str, cache_dir: str, train_csv: str, valid_csv: str, test_csvs: str, get_path_only: bool=False): data_dir = (target_dir / 'tokenizer_data') if get_path_only: return data_dir train_df = pd.read_csv(train_csv) valid_df = pd.read_csv(valid_csv) test_dfs = [pd.read_csv(test_csv) for test_csv in test_csvs] iob_lines = pd.concat([train_df, valid_df, *test_dfs], axis=0)['iob'].tolist() iobs = [] for line in iob_lines: iobs.extend(line.split(' ')) iobs = list(sorted(set(iobs))) Path(data_dir).mkdir(parents=True, exist_ok=True) with open((data_dir / 'slot.txt'), 'w') as f: f.writelines([f'''{iob} ''' for iob in iobs]) train_df = pd.read_csv(train_csv) texts = train_df['transcription'].tolist() with open((data_dir / 'text.txt'), 'w') as f: f.writelines([f'''{t} ''' for t in texts]) return data_dir def build_tokenizer(self, build_tokenizer: dict, target_dir: str, cache_dir: str, tokenizer_data_path: str, get_path_only: bool=False): return prepare_common_tokenizer(target_dir, cache_dir, (Path(tokenizer_data_path) / 'text.txt'), get_path_only, None, None, slots_file=(Path(tokenizer_data_path) / 'slot.txt'), **build_tokenizer) def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, tokenizer_path: str): csv = pd.read_csv(data_csv) audio_loader = LoadAudio(csv['wav_path'].tolist()) with open(tokenizer_path, 'rb') as f: tokenizer = pickle.load(f) text_encoder = EncodeText(csv['transcription'].tolist(), tokenizer, iob=csv['iob'].tolist()) ids = csv['id'].tolist() class SlotFillingDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] text = text_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_ids': text['class_ids'], 'labels': text['labels'], 'unique_name': ids[index]} dataset = SlotFillingDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`SortedSliceSampler`\n valid (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' @dataclass class Config(): train: dict = None valid: dict = None test: dict = None conf = Config(**build_batch_sampler) if (mode == 'train'): wav_lens = get_info(dataset, ['x_len'], (Path(target_dir) / 'train_stats')) sampler = SortedSliceSampler(wav_lens, **(conf.train or {})) return sampler elif (mode == 'valid'): return FixedBatchSizeBatchSampler(dataset, **(conf.valid or {})) elif (mode == 'test'): return FixedBatchSizeBatchSampler(dataset, **(conf.test or {})) else: raise ValueError(f'Unsupported mode: {mode}')
class ASV(Problem): def run(self, target_dir: str, cache_dir: str, remove_all_cache: bool=False, start: int=0, stop: int=None, num_workers: int=6, eval_batch: int=(- 1), device: str='cuda', world_size: int=1, rank: int=0, test_ckpt_dir: str=None, test_ckpt_steps: List[int]=None, prepare_data: dict=None, build_encoder: dict=None, build_dataset: dict=None, build_batch_sampler: dict=None, build_collate_fn: dict=None, build_upstream: dict=None, build_featurizer: dict=None, build_downstream: dict=None, build_model: dict=None, build_task: dict=None, build_optimizer: dict=None, build_scheduler: dict=None, save_model: dict=None, save_task: dict=None, train: dict=None, evaluate: dict=None): '\n ======== ====================\n stage description\n ======== ====================\n 0 Parse the corpus and save the metadata file (waveform path, label...)\n 1 Build the encoder for encoding the speaker labels\n 2 Train the model\n 3 Evaluate the model on multiple test sets, multiple checkpoints will be evaluated for each test set (See :code:`test_ckpt_steps`)\n 4 Report the best result find on each test set\n ======== ====================\n\n Args:\n target_dir (str):\n The directory that stores the script result.\n cache_dir (str):\n The directory that caches the processed data.\n Default: /home/user/.cache/s3prl/data\n remove_all_cache (bool):\n Whether to remove all the cache stored under `cache_dir`.\n Default: False\n start (int):\n The starting stage of the problem script.\n Default: 0\n stop (int):\n The stoping stage of the problem script, set `None` to reach the final stage.\n Default: None\n num_workers (int): num_workers for all the torch DataLoder\n eval_batch (int):\n During evaluation (valid or test), limit the number of batch.\n This is helpful for the fast development to check everything won\'t crash.\n If is -1, disable this feature and evaluate the entire epoch.\n Default: -1\n device (str):\n The device type for all torch-related operation: "cpu" or "cuda"\n Default: "cuda"\n world_size (int):\n How many processes are running this script simultaneously (in parallel).\n Usually this is just 1, however if you are runnig distributed training,\n this should be > 1.\n Default: 1\n rank (int):\n When distributed training, world_size > 1. Take :code:`world_size == 8` for\n example, this means 8 processes (8 GPUs) are runing in parallel. The script\n needs to know which process among 8 processes it is. In this case, :code:`rank`\n can range from 0~7. All the 8 processes have the same :code:`world_size` but\n different :code:`rank` (process id).\n test_ckpt_dir (str):\n Specify the checkpoint path for testing. If not, use checkpoints specified by\n :code:`test_ckpts_steps`.\n test_ckpt_steps (List[int]):\n After training, multiple steps of checkpoints are saved. This option specifies\n which checkpoints (multiple) will be used for evaluation.\n **kwds:\n The other arguments like :code:`prepare_data` and :code:`build_model` are\n method specific-arguments for methods like :obj:`prepare_data` and\n :obj:`build_model`, and will not be used in the core :obj:`run` logic.\n See the specific method documentation for their supported arguments and\n meaning\n ' yaml_path = ((Path(target_dir) / 'configs') / f'{self._get_time_tag()}.yaml') yaml_path.parent.mkdir(exist_ok=True, parents=True) with yaml_path.open('w') as f: yaml.safe_dump(self._get_current_arguments(), f) cache_dir: str = (cache_dir or (((Path.home() / '.cache') / 's3prl') / 'data')) prepare_data: dict = (prepare_data or {}) build_encoder: dict = (build_encoder or {}) build_dataset: dict = (build_dataset or {}) build_batch_sampler: dict = (build_batch_sampler or {}) build_collate_fn: dict = (build_collate_fn or {}) build_upstream: dict = (build_upstream or {}) build_featurizer: dict = (build_featurizer or {}) build_downstream: dict = (build_downstream or {}) build_model: dict = (build_model or {}) build_task: dict = (build_task or {}) build_optimizer: dict = (build_optimizer or {}) build_scheduler: dict = (build_scheduler or {}) save_model: dict = (save_model or {}) save_task: dict = (save_task or {}) train: dict = (train or {}) evaluate = (evaluate or {}) target_dir: Path = Path(target_dir) target_dir.mkdir(exist_ok=True, parents=True) cache_dir = Path(cache_dir) cache_dir.mkdir(exist_ok=True, parents=True) if remove_all_cache: shutil.rmtree(cache_dir) stage_id = 0 if (start <= stage_id): logger.info(f'Stage {stage_id}: prepare data') (train_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=False) (train_csv, test_csvs) = self.prepare_data(prepare_data, target_dir, cache_dir, get_path_only=True) def check_fn(): assert Path(train_csv).is_file() for test_csv in test_csvs: assert Path(test_csv).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 1 if (start <= stage_id): logger.info(f'Stage {stage_id}: build encoder') encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, test_csvs, get_path_only=False) encoder_path = self.build_encoder(build_encoder, target_dir, cache_dir, train_csv, test_csvs, get_path_only=True) def check_fn(): assert Path(encoder_path).is_file() self._stage_check(stage_id, stop, check_fn) stage_id = 2 train_dir = (target_dir / 'train') if (start <= stage_id): logger.info(f'Stage {stage_id}: Train Model') (train_ds, train_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'train', train_csv, encoder_path, build_dataset, build_batch_sampler) with Path(encoder_path).open('rb') as f: encoder = pickle.load(f) build_model_all_args = dict(build_model=build_model, model_output_size=len(encoder), build_upstream=build_upstream, build_featurizer=build_featurizer, build_downstream=build_downstream) build_task_all_args_except_model = dict(build_task=build_task, encoder=encoder) self.train(train, train_dir, build_model_all_args, build_task_all_args_except_model, save_model, save_task, build_optimizer, build_scheduler, evaluate, train_ds, train_bs, self.build_collate_fn(build_collate_fn, 'train'), None, None, None, device=device, eval_batch=eval_batch, num_workers=num_workers, world_size=world_size, rank=rank) test_ckpt_dirs = [] if (test_ckpt_dir is not None): test_ckpt_dirs.append(test_ckpt_dir) if (test_ckpt_steps is None): train_ckpts = [(train_dir / name) for name in os.listdir(train_dir) if name.startswith('step_')] test_ckpt_dirs.extend(train_ckpts) else: test_ckpt_dirs.extend([(train_dir / f'step_{step}') for step in test_ckpt_steps]) def check_fn(): for ckpt_dir in test_ckpt_dirs: assert Path(ckpt_dir).is_dir(), ckpt_dir self._stage_check(stage_id, stop, check_fn) stage_id = 3 if (start <= stage_id): for (test_idx, test_csv) in enumerate(test_csvs): for (ckpt_idx, ckpt_dir) in enumerate(test_ckpt_dirs): test_name = Path(test_csv).stem test_dir: Path = (((target_dir / 'evaluate') / test_name) / ckpt_dir.relative_to(train_dir).as_posix().replace('/', '-')) test_dir.mkdir(exist_ok=True, parents=True) logger.info(f'Stage {stage_id}.{test_idx}.{ckpt_idx}: Test on {test_csv} with model {ckpt_dir}') (test_ds, test_bs) = self._build_dataset_and_sampler(target_dir, cache_dir, 'test', test_csv, encoder_path, build_dataset, build_batch_sampler) csv = pd.read_csv(test_csv) test_trials = [] for (rowid, row) in csv.iterrows(): test_trials.append((int(row['label']), str(row['id1']), str(row['id2']))) overrides = dict(test_trials=test_trials) (_, task) = self.load_model_and_task(ckpt_dir, overrides) logs = self.evaluate(evaluate, 'test', task, test_ds, test_bs, self.build_collate_fn(build_collate_fn, 'test'), eval_batch, test_dir, device, num_workers) test_metrics = {name: float(value) for (name, value) in logs.items()} logger.info(f'test metrics: {test_metrics}') assert ('EER' in test_metrics) with (test_dir / f'result.yaml').open('w') as f: yaml.safe_dump(test_metrics, f) self._stage_check(stage_id, stop, (lambda : True)) stage_id = 4 if (start <= stage_id): for (test_idx, test_csv) in enumerate(test_csvs): test_name = Path(test_csv).stem logger.info(f'Report results on {test_name}') eer_ckpts = [] for ckpt_dir in os.listdir(((target_dir / 'evaluate') / test_name)): result_yaml: Path = ((((target_dir / 'evaluate') / test_name) / ckpt_dir) / 'result.yaml') if result_yaml.is_file(): with open(result_yaml) as f: eer_ckpts.append((float(yaml.load(f, Loader=yaml.FullLoader)['EER']), str(result_yaml.parent))) logger.info(f'All EERs on {test_name}:') for (eer, ckpt) in eer_ckpts: logger.info(f'ckpt_dir: {ckpt}, eer: {eer}') eer_ckpts.sort(key=(lambda x: x[0])) (best_eer, best_ckpt_dir) = eer_ckpts[0] logger.info(f'Best EER on {test_name} is from {best_ckpt_dir}: {best_eer}') with (((target_dir / 'evaluate') / test_name) / 'best_result.yaml').open('w') as f: yaml.safe_dump({'EER': best_eer}, f) def _build_dataset_and_sampler(self, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str, build_dataset: dict, build_batch_sampler: dict): logger.info(f'Build {mode} dataset') dataset = self.build_dataset(build_dataset, target_dir, cache_dir, mode, data_csv, encoder_path) logger.info(f'Build {mode} batch sampler') batch_sampler = self.build_batch_sampler(build_batch_sampler, target_dir, cache_dir, mode, data_csv, dataset) return (dataset, batch_sampler) def build_task(self, build_task: dict, model, encoder, test_trials=None): '\n Build the task, which defines the logics for every train/valid/test forward step for the :code:`model`,\n and the logics for how to reduce all the batch results from multiple train/valid/test steps into metrics\n\n By default build :obj:`SpeakerVerification`\n\n Args:\n build_task (dict): same in :obj:`default_config`, no argument supported for now\n model (torch.nn.Module): the model built by :obj:`build_model`\n encoder: the encoder built by :obj:`build_encoder`\n test_trials (List[Tuple[int, str, str]]): each tuple in the list consists of\n :code:`(label, enroll_utt_id, test_utt_id)`. label is either 0 or 1\n\n Returns:\n Task\n ' task = SpeakerVerification(model, encoder, test_trials, **build_task) return task
def prepare_voxceleb1_for_sv(target_dir: str, cache_dir: str, get_path_only: str, dataset_root: str, force_download: bool=False): '\n Prepare VoxCeleb1 for speaker verification\n following :obj:`SuperbASV.prepare_data` format.\n\n Args:\n dataset_root (str): The root path of Fluent Speech Command\n force_download (bool): always re-download the metadata for VoxCeleb1\n ' train_path = (target_dir / 'train.csv') test_trial_path = (target_dir / 'test_trial.csv') if get_path_only: return (train_path, [test_trial_path]) corpus = VoxCeleb1SV(dataset_root, cache_dir, force_download) (train_data, valid_data, test_data, test_trials) = corpus.all_data all_data = {**train_data, **valid_data} ids = sorted(all_data.keys()) wav_paths = [all_data[idx]['wav_path'] for idx in ids] labels = [all_data[idx]['label'] for idx in ids] pd.DataFrame({'id': ids, 'wav_path': wav_paths, 'spk': labels}).to_csv(train_path, index=False) (labels, id1s, id2s) = zip(*test_trials) wav_path1 = [test_data[idx]['wav_path'] for idx in id1s] wav_path2 = [test_data[idx]['wav_path'] for idx in id2s] pd.DataFrame({'id1': id1s, 'id2': id2s, 'wav_path1': wav_path1, 'wav_path2': wav_path2, 'label': labels}).to_csv(test_trial_path, index=False) return (train_path, [test_trial_path])
class SuperbASV(ASV): def default_config(self): return dict(target_dir=MISSING, cache_dir=None, test_ckpt_steps=None, prepare_data=dict(dataset_root=MISSING), build_dataset=dict(train=dict(min_secs=2.0, max_secs=8.0)), build_batch_sampler=dict(train=dict(batch_size=10, shuffle=True), test=dict(batch_size=1)), build_upstream=dict(name=MISSING), build_featurizer=dict(layer_selections=None, normalize=False), build_model=dict(upstream_trainable=False), build_task=dict(loss_type='amsoftmax', loss_conf=dict(margin=0.4, scale=30)), build_optimizer=dict(name='AdamW', conf=dict(lr=0.0001)), build_scheduler=dict(name='ExponentialLR', gamma=0.9), train=dict(total_steps=200000, log_step=500, eval_step=1e+20, save_step=10000, gradient_clipping=1000.0, gradient_accumulate=5, valid_metric=None, valid_higher_better=None, auto_resume=True, resume_ckpt_dir=None, keep_num_ckpts=None)) def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only: bool): '\n Prepare the task-specific data metadata (path, labels...).\n By default call :obj:`prepare_voxceleb1_for_sv` with :code:`**prepare_data`\n\n Args:\n prepare_data (dict): same in :obj:`default_config`,\n support arguments in :obj:`prepare_voxceleb1_for_sv`\n target_dir (str): Parse your corpus and save the csv file into this directory\n cache_dir (str): If the parsing or preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n get_path_only (bool): Directly return the filepaths no matter they exist or not.\n\n Returns:\n tuple\n\n 1. train_path (str)\n 2. test_trial_paths (List[str])\n\n The :code:`train_path` should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id (str) - the unique id for this utterance\n wav_path (str) - the absolute path of the waveform file\n spk (str) - a string speaker label\n ==================== ====================\n\n Each :code:`test_trial_path` should be a csv file containing the following columns:\n\n ==================== ====================\n column description\n ==================== ====================\n id1 (str) - the unique id of the first utterance\n id2 (str) - the unique id of the second utterance\n wav_path1 (str) - the absolute path of the first utterance\n wav_path2 (str) - the absolute path of the second utterance\n label (int) - 0 when two utterances are from different speakers, 1 when same speaker\n ==================== ====================\n ' return prepare_voxceleb1_for_sv(**self._get_current_arguments(flatten_dict='prepare_data')) def build_encoder(self, build_encoder: dict, target_dir: str, cache_dir: str, train_csv: str, test_csvs: list, get_path_only: bool): '\n Build the encoder (for the labels) given the data metadata, and return the saved encoder path.\n By default generate and save a :obj:`s3prl.dataio.encoder.CategoryEncoder` from the :code:`label` column of the train csv.\n\n Args:\n build_encoder (dict): same in :obj:`default_config`, no argument supported for now\n target_dir (str): Save your encoder into this directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n train_csv_path (str): the train path from :obj:`prepare_data`\n valid_csv_path (str): the valid path from :obj:`prepare_data`\n test_csv_paths (List[str]): the test paths from :obj:`prepare_data`\n get_path_only (bool): Directly return the filepaths no matter they exist or not\n\n Returns:\n str\n\n encoder_path: The encoder should be saved in the pickle format\n ' encoder_path = (Path(target_dir) / 'spk2int.pkl') if get_path_only: return encoder_path csv = pd.read_csv(train_csv) all_spk = sorted(set(csv['spk'])) spk2int = CategoryEncoder(all_spk) with open(encoder_path, 'wb') as f: pickle.dump(spk2int, f) return encoder_path def build_dataset(self, build_dataset: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, encoder_path: str): '\n Build the dataset for train/valid/test.\n\n Args:\n build_dataset (dict): same in :obj:`default_config`, have\n :code:`train` and :code:`test` keys, each is a dictionary, for :code:`train` dictionary:\n\n ==================== ====================\n key description\n ==================== ====================\n min_secs (float) - Drop a waveform if it is not longer than :code:`min_secs`\n max_secs (float) - If a waveform is longer than :code:`max_secs` seconds, randomly crop the waveform into :code:`max_secs` seconds. Default: None, no cropping\n ==================== ====================\n\n for :code:`test` dictionary, no argument supported yet\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, you can save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): The metadata csv file for the specific :code:`mode`\n encoder_path (str): The pickled encoder path for encoding the labels\n\n Returns:\n torch Dataset\n\n For train mode, the dataset should return each item as a dictionary\n containing the following keys:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n class_id (str) - the label class id encoded by :code:`encoder_path`\n unique_name (str) - the unique id for this datapoint\n ==================== ====================\n\n For test mode:\n\n ==================== ====================\n key description\n ==================== ====================\n x (torch.FloatTensor) - the waveform in (seq_len, 1)\n x_len (int) - the waveform length :code:`seq_len`\n unique_name (str) - the unique id for this datapoint\n\n ' assert (mode in ['train', 'test']), 'Only support train & test mode (no validation)' if (mode == 'train'): @dataclass class Config(): min_secs: float = None max_secs: float = None conf = build_dataset.get('train', {}) conf = Config(**conf) csv = pd.read_csv(data_csv) wav_paths = csv['wav_path'].tolist() audio_loader = LoadAudio(wav_paths, sox_effects=EFFECTS, max_secs=conf.max_secs) labels = csv['spk'].tolist() with open(encoder_path, 'rb') as f: encoder = pickle.load(f) label_encoder = EncodeCategory(labels, encoder) ids = csv['id'].tolist() class SVTrainDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] label = label_encoder[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'class_id': label['class_id'], 'unique_name': ids[index]} dataset = SVTrainDataset() if (conf.min_secs is not None): (x_lens, unique_names) = get_info(dataset, ['x_len', 'unique_name'], (target_dir / 'train_utt_len')) indices = [] removed_indices = [] for (idx, (x_len, unique_name)) in enumerate(zip(x_lens, unique_names)): secs = (x_len / SAMPLE_RATE) if (secs <= conf.min_secs): logger.info(f'Remove utt {unique_name} since too short after sox effects: {secs} secs') removed_indices.append(idx) else: indices.append(idx) if (len(removed_indices) > 0): logger.info(f'Remove in total {len(removed_indices)} utts') dataset = Subset(dataset, indices) elif (mode == 'test'): csv = pd.read_csv(data_csv) ids = pd.concat([csv['id1'], csv['id2']], ignore_index=True).tolist() wav_paths = pd.concat([csv['wav_path1'], csv['wav_path2']], ignore_index=True).tolist() data_list = sorted(set([(idx, path) for (idx, path) in zip(ids, wav_paths)])) (ids, wav_paths) = zip(*data_list) audio_loader = LoadAudio(wav_paths) class SVTestDataset(): def __len__(self): return len(audio_loader) def __getitem__(self, index: int): audio = audio_loader[index] return {'x': audio['wav'], 'x_len': audio['wav_len'], 'unique_name': ids[index]} dataset = SVTestDataset() return dataset def build_batch_sampler(self, build_batch_sampler: dict, target_dir: str, cache_dir: str, mode: str, data_csv: str, dataset): '\n Return the batch sampler for torch DataLoader.\n\n Args:\n build_batch_sampler (dict): same in :obj:`default_config`\n\n ==================== ====================\n key description\n ==================== ====================\n train (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n test (dict) - arguments for :obj:`FixedBatchSizeBatchSampler`\n ==================== ====================\n\n Note that ASV does not support valid\n\n target_dir (str): Current experiment directory\n cache_dir (str): If the preprocessing takes too long time, save\n the temporary files into this directory. This directory is expected to be shared\n across different training sessions (different hypers and :code:`target_dir`)\n mode (str): train/valid/test\n data_csv (str): the :code:`mode` specific csv from :obj:`prepare_data`\n dataset: the dataset from :obj:`build_dataset`\n\n Returns:\n batch sampler for torch DataLoader\n ' train = build_batch_sampler.get('train', {}) test = build_batch_sampler.get('test', {}) if (mode == 'train'): return FixedBatchSizeBatchSampler(dataset, **train) elif (mode == 'test'): return FixedBatchSizeBatchSampler(dataset, **test) else: raise ValueError('ASV only supports train/test modes') def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int): "\n Return the task-specific downstream model.\n By default build the :obj:`SuperbXvector` model\n\n Args:\n build_downstream (dict): same in :obj:`default_config`, support arguments of :obj:`SuperbXvector`\n downstream_input_size (int): the required input size of the model\n downstream_output_size (int): the required output size of the model\n downstream_input_stride (int): the input feature's stride (from 16 KHz)\n\n Returns:\n :obj:`s3prl.nn.interface.AbsUtteranceModel`\n " model = SuperbXvector(downstream_input_size, **build_downstream) return model
class _DistributedDataParallel(torch.nn.parallel.DistributedDataParallel): def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return getattr(self.module, name)
def _force_cacheable(data: dict): output = dict() for (key, value) in data.items(): if isinstance(value, torch.Tensor): value = value.detach().cpu() output[key] = value return output
def _to_device(data, device: str): output = dict() for (key, value) in data.items(): if isinstance(value, torch.Tensor): value = value.to(device) output[key] = value return output