code
stringlengths
17
6.64M
def cpc_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def cpc_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return cpc_local(_urls_to_filepaths(ckpt), *args, **kwargs)
def modified_cpc(refresh=False, *args, **kwargs): '\n The model from official repository\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/60k_epoch4-d0f474de.pt' return cpc_url(*args, refresh=refresh, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path) load_converted_model(output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Data2VecAudioConfig, ckpt_state['model_cfg']) model = Data2VecAudioModel(model_cfg) model.remove_pretraining_modules() del ckpt_state['model_weight']['_ema'] model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
class EMAModuleConfig(): ema_decay: float = field(default=0.9999, metadata={'help': 'decay for exponential moving average model'}) ema_fp32: bool = field(default=False, metadata={'help': 'If true, store EMA model in fp32 even if model is in fp16'})
class EMAModule(): 'Exponential Moving Average of Fairseq Models' def __init__(self, model, config: EMAModuleConfig, device=None, skip_keys=None): '\n @param model model to initialize the EMA with\n @param config EMAConfig object with configuration like\n ema_decay, ema_update_freq, ema_fp32\n @param device If provided, copy EMA to this device (e.g. gpu).\n Otherwise EMA is in the same device as the model.\n ' self.decay = config.ema_decay self.model = copy.deepcopy(model) self.model.requires_grad_(False) self.config = config self.skip_keys = (skip_keys or set()) self.fp32_params = {} if (device is not None): logging.info(f'Copying EMA model to device {device}') self.model = self.model.to(device=device) if self.config.ema_fp32: self.build_fp32_params() self.update_freq_counter = 0 def build_fp32_params(self, state_dict=None): '\n Store a copy of the EMA params in fp32.\n If state dict is passed, the EMA params is copied from\n the provided state dict. Otherwise, it is copied from the\n current EMA model parameters.\n ' if (not self.config.ema_fp32): raise RuntimeError('build_fp32_params should not be called if ema_fp32=False. Use ema_fp32=True if this is really intended.') if (state_dict is None): state_dict = self.model.state_dict() def _to_float(t): return (t.float() if torch.is_floating_point(t) else t) for param_key in state_dict: if (param_key in self.fp32_params): self.fp32_params[param_key].copy_(state_dict[param_key]) else: self.fp32_params[param_key] = _to_float(state_dict[param_key]) def restore(self, state_dict, build_fp32_params=False): 'Load data from a model spec into EMA model' self.model.load_state_dict(state_dict, strict=False) if build_fp32_params: self.build_fp32_params(state_dict) def set_decay(self, decay): self.decay = decay def get_decay(self): return self.decay def _step_internal(self, new_model): 'One update of the EMA model based on new model weights' decay = self.decay ema_state_dict = {} ema_params = (self.fp32_params if self.config.ema_fp32 else self.model.state_dict()) for (key, param) in new_model.named_parameters(): if isinstance(param, dict): continue try: ema_param = ema_params[key] except KeyError: ema_param = (param.float().clone() if (param.ndim == 1) else copy.deepcopy(param)) ema_params[key] = ema_param if (param.shape != ema_param.shape): raise ValueError(('incompatible tensor shapes between model param and ema param' + '{} vs. {}'.format(param.shape, ema_param.shape))) if ('version' in key): continue if ((key in self.skip_keys) or (not param.requires_grad)): ema_params[key].copy_(param.to(dtype=ema_param.dtype).data) ema_param = ema_params[key] else: ema_param.mul_(decay) ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=(1 - decay)) ema_state_dict[key] = ema_param for (key, param) in new_model.named_buffers(): ema_state_dict[key] = param self.restore(ema_state_dict, build_fp32_params=False) @torch.no_grad() def step(self, new_model): self._step_internal(new_model) def reverse(self, model): '\n Load the model parameters from EMA model.\n Useful for inference or fine-tuning from the EMA model.\n ' d = self.model.state_dict() if ('_ema' in d): del d['_ema'] model.load_state_dict(d, strict=False) return model
@dataclass class Data2VecAudioConfig(Wav2Vec2Config): loss_beta: float = field(default=0, metadata={'help': 'beta for smooth l1 loss. 0 means use l2 loss'}) loss_scale: Optional[float] = field(default=None, metadata={'help': 'scale the reconstruction loss by this constant. if None then scales by 1/sqrt(dim)'}) average_top_k_layers: int = field(default=8, metadata={'help': 'how many layers to average'}) layer_norm_target_layer: bool = False instance_norm_target_layer: bool = False instance_norm_targets: bool = False layer_norm_targets: bool = False batch_norm_target_layer: bool = False group_norm_target_layer: bool = False ema_decay: float = field(default=0.999, metadata={'help': 'initial ema decay rate'}) ema_end_decay: float = field(default=0.9999, metadata={'help': 'final ema decay rate'}) ema_anneal_end_step: int = None ema_transformer_only: bool = field(default=True, metadata={'help': 'whether to momentum update only the transformer'}) ema_layers_only: bool = field(default=True, metadata={'help': 'whether to momentum update only the transformer layers'}) max_update: int = None min_target_var: float = field(default=0.1, metadata={'help': 'stop training if target var falls below this'}) min_pred_var: float = field(default=0.01, metadata={'help': 'stop training if prediction var falls below this'})
def get_annealed_rate(start, end, curr_step, total_steps): r = (end - start) pct_remaining = (1 - (curr_step / total_steps)) return (end - (r * pct_remaining))
class Data2VecAudioModel(torch.nn.Module): def __init__(self, cfg: Data2VecAudioConfig): super().__init__() self.cfg = cfg feature_enc_layers = eval(cfg.conv_feature_layers) self.extractor_embed = feature_enc_layers[(- 1)][0] self.ema = None self.embed = cfg.encoder_embed_dim self.average_top_k_layers = cfg.average_top_k_layers self.loss_beta = cfg.loss_beta self.loss_scale = cfg.loss_scale self.feature_extractor = ConvFeatureExtractionModel(conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias) self.post_extract_proj = nn.Linear(self.extractor_embed, cfg.encoder_embed_dim) self.mask_prob = cfg.mask_prob self.mask_selection = cfg.mask_selection self.mask_other = cfg.mask_other self.mask_length = cfg.mask_length self.no_mask_overlap = cfg.no_mask_overlap self.mask_min_space = cfg.mask_min_space self.mask_channel_prob = cfg.mask_channel_prob self.mask_channel_before = cfg.mask_channel_before self.mask_channel_selection = cfg.mask_channel_selection self.mask_channel_other = cfg.mask_channel_other self.mask_channel_length = cfg.mask_channel_length self.no_mask_channel_overlap = cfg.no_mask_channel_overlap self.mask_channel_min_space = cfg.mask_channel_min_space self.dropout_input = nn.Dropout(cfg.dropout_input) self.dropout_features = nn.Dropout(cfg.dropout_features) self.feature_grad_mult = cfg.feature_grad_mult self.mask_emb = nn.Parameter(torch.FloatTensor(cfg.encoder_embed_dim).uniform_()) self.encoder = TransformerEncoder(cfg) self.layer_norm = LayerNorm(self.extractor_embed) self.final_proj = nn.Linear(self.embed, self.embed) self.num_updates = 0 def make_ema_teacher(self): ema_config = EMAModuleConfig(ema_decay=self.cfg.ema_decay, ema_fp32=True) skip_keys = set() if self.cfg.ema_layers_only: self.cfg.ema_transformer_only = True for (k, _) in self.encoder.pos_conv.named_parameters(): skip_keys.add(f'pos_conv.{k}') self.ema = EMAModule((self.encoder if self.cfg.ema_transformer_only else self), ema_config, skip_keys=skip_keys) def set_num_updates(self, num_updates): super().set_num_updates(num_updates) if ((self.ema is None) and (self.final_proj is not None)): logger.info(f'making ema teacher') self.make_ema_teacher() elif (self.training and (self.ema is not None)): if (self.cfg.ema_decay != self.cfg.ema_end_decay): if (num_updates >= self.cfg.ema_anneal_end_step): decay = self.cfg.ema_end_decay else: decay = get_annealed_rate(self.cfg.ema_decay, self.cfg.ema_end_decay, num_updates, self.cfg.ema_anneal_end_step) self.ema.set_decay(decay) if (self.ema.get_decay() < 1): self.ema.step((self.encoder if self.cfg.ema_transformer_only else self)) self.num_updates = num_updates def state_dict(self, destination=None, prefix='', keep_vars=False): state = super().state_dict(destination, prefix, keep_vars) if (self.ema is not None): state[(prefix + '_ema')] = self.ema.fp32_params return state def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): if (self.ema is not None): k = (prefix + '_ema') assert (k in state_dict) self.ema.restore(state_dict[k], True) del state_dict[k] return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) @classmethod def build_model(cls, cfg: Data2VecAudioConfig, task=None): 'Build a new model instance.' return cls(cfg) def apply_mask(self, x, padding_mask, mask_indices=None, mask_channel_indices=None): (B, T, C) = x.shape if ((self.mask_channel_prob > 0) and self.mask_channel_before): mask_channel_indices = compute_mask_indices((B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space) mask_channel_indices = torch.from_numpy(mask_channel_indices).to(x.device).unsqueeze(1).expand((- 1), T, (- 1)) x[mask_channel_indices] = 0 if (self.mask_prob > 0): if (mask_indices is None): mask_indices = compute_mask_indices((B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=1, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space, require_same_masks=self.cfg.require_same_masks, mask_dropout=self.cfg.mask_dropout) mask_indices = torch.from_numpy(mask_indices).to(x.device) x = index_put(x, mask_indices, self.mask_emb) else: mask_indices = None if ((self.mask_channel_prob > 0) and (not self.mask_channel_before)): if (mask_channel_indices is None): mask_channel_indices = compute_mask_indices((B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space) mask_channel_indices = torch.from_numpy(mask_channel_indices).to(x.device).unsqueeze(1).expand((- 1), T, (- 1)) x = index_put(x, mask_channel_indices, 0) return (x, mask_indices) def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): '\n Computes the output length of the convolutional layers\n ' def _conv_out_length(input_length, kernel_size, stride): return torch.floor((((input_length - kernel_size) / stride) + 1)) conv_cfg_list = eval(self.cfg.conv_feature_layers) for i in range(len(conv_cfg_list)): input_lengths = _conv_out_length(input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]) return input_lengths.to(torch.long) def forward(self, source, padding_mask=None, mask=True, features_only=False, layer=None, mask_indices=None, mask_channel_indices=None, padding_count=None): features = source if (self.feature_grad_mult > 0): features = self.feature_extractor(features) if (self.feature_grad_mult != 1.0): features = GradMultiply.apply(features, self.feature_grad_mult) else: with torch.no_grad(): features = self.feature_extractor(features) features = features.transpose(1, 2) features = self.layer_norm(features) orig_padding_mask = padding_mask if ((padding_mask is not None) and padding_mask.any()): input_lengths = (1 - padding_mask.long()).sum((- 1)) output_lengths = self._get_feat_extract_output_lengths(input_lengths) padding_mask = torch.zeros(features.shape[:2], dtype=features.dtype, device=features.device) padding_mask[(torch.arange(padding_mask.shape[0], device=padding_mask.device), (output_lengths - 1))] = 1 padding_mask = (1 - padding_mask.flip([(- 1)]).cumsum((- 1)).flip([(- 1)])).bool() else: padding_mask = None if (self.post_extract_proj is not None): features = self.post_extract_proj(features) pre_encoder_features = None if self.cfg.ema_transformer_only: pre_encoder_features = features.clone() features = self.dropout_input(features) if mask: (x, mask_indices) = self.apply_mask(features, padding_mask, mask_indices=mask_indices, mask_channel_indices=mask_channel_indices) else: x = features mask_indices = None (x, layer_results) = self.encoder(x, padding_mask=padding_mask, layer=layer) if features_only: return {'x': x, 'padding_mask': padding_mask, 'layer_results': layer_results} result = {'losses': {}} with torch.no_grad(): self.ema.model.eval() if self.cfg.ema_transformer_only: (y, layer_results) = self.ema.model.extract_features(pre_encoder_features, padding_mask=padding_mask, min_layer=(self.cfg.encoder_layers - self.average_top_k_layers)) y = {'x': y, 'padding_mask': padding_mask, 'layer_results': layer_results} else: y = self.ema.model.extract_features(source=source, padding_mask=orig_padding_mask, mask=False) target_layer_results = [l[2] for l in y['layer_results']] permuted = False if (self.cfg.instance_norm_target_layer or self.cfg.batch_norm_target_layer): target_layer_results = [tl.permute(1, 2, 0) for tl in target_layer_results] permuted = True if self.cfg.batch_norm_target_layer: target_layer_results = [F.batch_norm(tl.float(), running_mean=None, running_var=None, training=True) for tl in target_layer_results] if self.cfg.instance_norm_target_layer: target_layer_results = [F.instance_norm(tl.float()) for tl in target_layer_results] if permuted: target_layer_results = [tl.transpose(1, 2) for tl in target_layer_results] if self.cfg.group_norm_target_layer: target_layer_results = [F.layer_norm(tl.float(), tl.shape[(- 2):]) for tl in target_layer_results] if self.cfg.layer_norm_target_layer: target_layer_results = [F.layer_norm(tl.float(), tl.shape[(- 1):]) for tl in target_layer_results] y = (sum(target_layer_results) / len(target_layer_results)) if self.cfg.layer_norm_targets: y = F.layer_norm(y.float(), y.shape[(- 1):]) if self.cfg.instance_norm_targets: y = F.instance_norm(y.float().transpose(1, 2)).transpose(1, 2) if (not permuted): y = y.transpose(0, 1) y = y[mask_indices] x = x[mask_indices] x = self.final_proj(x) sz = x.size((- 1)) if (self.loss_beta == 0): loss = F.mse_loss(x.float(), y.float(), reduction='none').sum(dim=(- 1)) else: loss = F.smooth_l1_loss(x.float(), y.float(), reduction='none', beta=self.loss_beta).sum(dim=(- 1)) if (self.loss_scale is not None): scale = self.loss_scale else: scale = (1 / math.sqrt(sz)) result['losses']['regression'] = (loss.sum() * scale) if ('sample_size' not in result): result['sample_size'] = loss.numel() with torch.no_grad(): result['target_var'] = self.compute_var(y) result['pred_var'] = self.compute_var(x.float()) if ((self.num_updates > 5000) and (result['target_var'] < self.cfg.min_target_var)): logger.error(f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting") raise Exception(f"target var is {result['target_var'].item()} < {self.cfg.min_target_var}, exiting") if ((self.num_updates > 5000) and (result['pred_var'] < self.cfg.min_pred_var)): logger.error(f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting") raise Exception(f"pred var is {result['pred_var'].item()} < {self.cfg.min_pred_var}, exiting") if (self.ema is not None): result['ema_decay'] = (self.ema.get_decay() * 1000) return result @staticmethod def compute_var(y): y = y.view((- 1), y.size((- 1))) if dist.is_initialized(): zc = torch.tensor(y.size(0)).cuda() zs = y.sum(dim=0) zss = (y ** 2).sum(dim=0) dist.all_reduce(zc) dist.all_reduce(zs) dist.all_reduce(zss) var = ((zss / (zc - 1)) - ((zs ** 2) / (zc * (zc - 1)))) return torch.sqrt((var + 1e-06)).mean() else: return torch.sqrt((y.var(dim=0) + 1e-06)).mean() def extract_features(self, source, padding_mask, mask=False, layer=None): res = self.forward(source, padding_mask, mask=mask, features_only=True, layer=layer) return res def remove_pretraining_modules(self, last_layer=None): self.final_proj = None self.ema = None if (last_layer is not None): self.encoder.layers = nn.ModuleList((l for (i, l) in enumerate(self.encoder.layers) if (i <= last_layer)))
def data2vec_custom(ckpt: str, refresh: bool=False, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) return _UpstreamExpert(ckpt, **kwargs)
def data2vec_local(*args, **kwargs): return data2vec_custom(*args, **kwargs)
def data2vec_url(*args, **kwargs): return data2vec_custom(*args, **kwargs)
def data2vec(refresh=False, *args, **kwargs): '\n The default model - Base\n refresh (bool): whether to download ckpt/config again if existed\n ' return data2vec_base_960(*args, refresh=refresh, **kwargs)
def data2vec_base_960(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/audio_base_ls.pt' return data2vec_custom(*args, refresh=refresh, **kwargs)
def data2vec_large_ll60k(refresh=False, *args, **kwargs): '\n The Large model trained on Libri-light 60k hours of data\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/vox_pretrained.pt' return data2vec_custom(*args, refresh=refresh, **kwargs)
class CMVN(torch.jit.ScriptModule): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVN, self).__init__() if (mode != 'global'): raise NotImplementedError('Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps @torch.jit.script_method def forward(self, x): if (self.mode == 'global'): return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True))) def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
class FeatureExtractor(nn.Module): 'Feature extractor, transforming file path to Mel spectrogram' def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs): super(FeatureExtractor, self).__init__() assert (mode == 'fbank'), 'Only Mel-spectrogram implemented' self.mode = mode self.extract_fn = kaldi.fbank self.apply_cmvn = apply_cmvn if self.apply_cmvn: self.cmvn = CMVN() self.num_mel_bins = num_mel_bins self.kwargs = kwargs self.decode_wav = decode_wav if self.decode_wav: torchaudio.set_audio_backend('soundfile') def _load_file(self, filepath): if self.decode_wav: (waveform, sample_rate) = torchaudio.load_wav(filepath) else: (waveform, sample_rate) = torchaudio.load(filepath) return (waveform, sample_rate) def forward(self, waveform): y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs) if self.apply_cmvn: y = y.transpose(0, 1).unsqueeze(0) y = self.cmvn(y) y = y.squeeze(0).transpose(0, 1) return y def extra_repr(self): return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins) def create_msg(self): 'List msg for verbose function' msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn) return [msg]
def create_transform(): return FeatureExtractor()
def decoar_custom(ckpt: str, refresh=False, *args, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) return _UpstreamExpert(ckpt, *args, **kwargs)
def decoar_local(*args, **kwargs): return decoar_custom(*args, **kwargs)
def decoar_url(*args, **kwargs): return decoar_custom(*args, **kwargs)
def decoar(refresh=False, *args, **kwargs): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/checkpoint_decoar.pt' return decoar_url(*args, refresh=refresh, **kwargs)
def decoar2_custom(ckpt: str, refresh=False, *args, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) return _UpstreamExpert(ckpt, *args, **kwargs)
def decoar2_local(*args, **kwargs): "\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (str): 'c' (default) or 'z'\n " return decoar2_custom(*args, **kwargs)
def decoar2_url(*args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return decoar2_custom(*args, **kwargs)
def decoar2(*args, refresh=False, **kwargs): '\n The apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/checkpoint_decoar2.pt' return decoar2_url(*args, refresh=refresh, **kwargs)
class CMVN(torch.jit.ScriptModule): __constants__ = ['mode', 'dim', 'eps'] def __init__(self, mode='global', dim=2, eps=1e-10): super(CMVN, self).__init__() if (mode != 'global'): raise NotImplementedError('Only support global mean variance normalization.') self.mode = mode self.dim = dim self.eps = eps @torch.jit.script_method def forward(self, x): if (self.mode == 'global'): return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True))) def extra_repr(self): return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
class FeatureExtractor(nn.Module): 'Feature extractor, transforming file path to Mel spectrogram' def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs): super(FeatureExtractor, self).__init__() assert (mode == 'fbank'), 'Only Mel-spectrogram implemented' self.mode = mode self.extract_fn = kaldi.fbank self.apply_cmvn = apply_cmvn if self.apply_cmvn: self.cmvn = CMVN() self.num_mel_bins = num_mel_bins self.kwargs = kwargs self.decode_wav = decode_wav if self.decode_wav: torchaudio.set_audio_backend('soundfile') def _load_file(self, filepath): if self.decode_wav: (waveform, sample_rate) = torchaudio.load_wav(filepath) else: (waveform, sample_rate) = torchaudio.load(filepath) return (waveform, sample_rate) def forward(self, waveform): y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs) if self.apply_cmvn: y = y.transpose(0, 1).unsqueeze(0) y = self.cmvn(y) y = y.squeeze(0).transpose(0, 1) return y def extra_repr(self): return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins) def create_msg(self): 'List msg for verbose function' msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn) return [msg]
def create_transform(): return FeatureExtractor()
def decoar_layers_custom(ckpt: str, refresh=False, *args, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) return _UpstreamExpert(ckpt, *args, **kwargs)
def decoar_layers_local(*args, **kwargs): return decoar_layers_custom(*args, **kwargs)
def decoar_layers_url(*args, **kwargs): return decoar_layers_custom(*args, **kwargs)
def decoar_layers(*args, refresh=False, **kwargs): '\n The apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/checkpoint_decoar.pt' return decoar_layers_url(*args, refresh=refresh, **kwargs)
class UpstreamExpert(UpstreamBase): '\n The Distiller wrapper\n ' def __init__(self, ckpt, model_config=None, **kwargs): super().__init__(**kwargs) if (model_config is not None): print('[UpstreamExpert] - Using upstream expert config file from:', model_config) with open(model_config, 'r') as file: options = yaml.load(file, Loader=yaml.FullLoader) else: print('[UpstreamExpert] - Using the default upstream expert config') options = {'load_pretrain': 'True', 'no_grad': 'False', 'permute_input': 'False'} options['ckpt_file'] = ckpt self.model = PretrainedDistiller(options) def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs, no_pred=False): (_, feat_final, pred, pad_mask, layer_hidden) = self.model(wavs, get_hidden=True, no_pred=no_pred) if (not no_pred): hidden_feats = pred.transpose(0, 1).split(1, 0) hidden_feats = [hid.squeeze(0) for hid in hidden_feats] else: hidden_feats = [] hidden_feats = (([feat_final] + layer_hidden) + hidden_feats) states = {'last_hidden_state': (None if no_pred else hidden_feats[(- 1)]), 'hidden_states': hidden_feats, 'pad_mask': pad_mask, 'paper': layer_hidden[(- 1)]} return states
def distiller_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def distiller_url(ckpt, refresh=False, *args, **kwargs): '\n The model from url\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return distiller_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def distilhubert(refresh=False, *args, **kwargs): '\n DistilHuBERT\n ' return distilhubert_base(*args, refresh=refresh, **kwargs)
def distilhubert_base(refresh=False, *args, **kwargs): '\n DistilHuBERT Base\n Default model in https://arxiv.org/abs/2110.01900\n ' kwargs['ckpt'] = 'https://huggingface.co/leo19941227/distilhubert/resolve/main/distilhubert_ls960_4-8-12.ckpt' return distiller_url(*args, refresh=refresh, **kwargs)
def init_bert_params(module): '\n Initialize the weights specific to the BERT Model.\n This overrides the default initializations depending on the specified arguments.\n 1. If normal_init_linear_weights is set then weights of linear\n layer will be initialized using the normal distribution and\n bais will be set to the specified value.\n 2. If normal_init_embed_weights is set then weights of embedding\n layer will be initialized using the normal distribution.\n 3. If normal_init_proj_weights is set then weights of\n in_project_weight for MultiHeadAttention initialized using\n the normal distribution (to be validated).\n ' def normal_(data): data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device)) if isinstance(module, nn.Linear): normal_(module.weight.data) if (module.bias is not None): module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if (module.padding_idx is not None): module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
class SplitLinear(nn.Module): 'Split Linear Layer' def __init__(self, in_dim, in_split, out_dim): super().__init__() self.in_dim = in_dim self.in_split = in_split self.out_dim = out_dim if (in_split > 1): weight = torch.zeros((self.in_split, self.in_dim, self.out_dim)) self.weight = nn.Parameter(weight, requires_grad=True) nn.init.uniform_(self.weight, (- (self.in_dim ** (- 0.5))), (self.in_dim ** (- 0.5))) bias = torch.zeros((1, 1, self.in_split, self.out_dim)) self.bias = nn.Parameter(bias, requires_grad=True) nn.init.uniform_(self.bias, (- (self.in_dim ** (- 0.5))), (self.in_dim ** (- 0.5))) else: self.layer = nn.Linear(self.in_dim, self.out_dim) def forward(self, x: torch.Tensor): if (self.in_split == 1): return self.layer(x) else: x = x.reshape(x.shape[0], x.shape[1], self.in_split, 1, self.in_dim) out = torch.einsum('...klm,kmn->...kln', x, self.weight).squeeze(3) out = (out + self.bias) return out.reshape(x.shape[0], x.shape[1], (- 1))
class TransformerSentenceEncoderLayer(nn.Module): '\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n ' def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', layer_norm_first: bool=False, attention_type: str='original') -> None: super().__init__() self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout self.activation_fn = get_activation_fn(activation_fn) self.attention_type = attention_type if (attention_type == 'original'): self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True) elif (attention_type == 'sparse'): from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, stride=32, expressivity=16) elif (attention_type == 'dynamic'): from fairseq.modules import DynamicConv self.self_attn = DynamicConv(self.embedding_dim, kernel_size=31, padding_l=15, num_heads=num_attention_heads, weight_dropout=0.0, weight_softmax=True, bias=True) else: raise NotImplementedError(f'Unknown attention type {attention_type}') self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) self.final_layer_norm = nn.LayerNorm(self.embedding_dim) def forward_self_attn(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None, need_weights: bool=False): if (self.attention_type in ['original', 'sparse']): (x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=need_weights, attn_mask=self_attn_mask) elif (self.attention_type == 'dynamic'): x = self.self_attn(x) attn = None return (x, attn) def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None, need_weights: bool=False, att_args=None): '\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n ' residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) (x, attn) = self.forward_self_attn(x, self_attn_mask=self_attn_mask, need_weights=False, self_attn_padding_mask=self_attn_padding_mask) x = self.dropout1(x) x = (residual + x) residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = (residual + x) else: (x, attn) = self.forward_self_attn(x, self_attn_mask=self_attn_mask, need_weights=need_weights, self_attn_padding_mask=self_attn_padding_mask) x = self.dropout1(x) x = (residual + x) x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = (residual + x) x = self.final_layer_norm(x) return (x, attn)
class TransformerEncoder(nn.Module): def __init__(self, args): super().__init__() self.dropout = args.dropout self.embedding_dim = args.encoder_embed_dim self.pos_conv = nn.Conv1d(self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=(args.conv_pos // 2), groups=args.conv_pos_groups) dropout = 0 std = math.sqrt(((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))) nn.init.normal_(self.pos_conv.weight, mean=0, std=std) nn.init.constant_(self.pos_conv.bias, 0) self.pos_conv = nn.utils.weight_norm(self.pos_conv, name='weight', dim=2) self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU()) print(f'[TransformerEncoder] - Attention type = {args.attention_type}') self.layers = nn.ModuleList([TransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, attention_type=args.attention_type) for _ in range(args.encoder_layers)]) self.layer_norm_first = args.layer_norm_first self.layer_norm = nn.LayerNorm(self.embedding_dim) self.layerdrop = args.encoder_layerdrop self.apply(init_bert_params) def forward(self, x, padding_mask=None, attn_mask=None, get_hidden=False): (x, layer_results) = self.extract_features(x, padding_mask, attn_mask, get_hidden=get_hidden) if self.layer_norm_first: x = self.layer_norm(x) return (x, layer_results) def extract_features(self, x, padding_mask=None, attn_mask=None, get_hidden=False): if (padding_mask is not None): x[padding_mask] = 0 x_conv = self.pos_conv(x.transpose(1, 2)) x_conv = x_conv.transpose(1, 2) x = (x + x_conv) if (not self.layer_norm_first): x = self.layer_norm(x) x = F.dropout(x, p=self.dropout, training=self.training) x = x.transpose(0, 1) layer_results = [] for (i, layer) in enumerate(self.layers): dropout_probability = np.random.random() if ((not self.training) or (dropout_probability > self.layerdrop)): (x, z) = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, self_attn_mask=attn_mask) if get_hidden: layer_results.append(x.transpose(0, 1)) x = x.transpose(0, 1) return (x, layer_results)
class UpstreamExpert(torch.nn.Module): def __init__(self, ckpt, config=None, **kwargs): super().__init__() device = ('cuda' if torch.cuda.is_available() else 'cpu') assert (HubertTask is not None), 'ESPnet is not installed, run `external_tools/install_espnet.sh` to install' (hubert_model, hubert_train_args) = HubertTask.build_model_from_file(config, ckpt, device) self.device = next(hubert_model.parameters()).device self.model = hubert_model.encoder.hubert_pretrain_model def get_downsample_rates(self, key: str=None) -> int: return 320 def forward(self, wavs): wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(self.device) wavs = pad_sequence(wavs, batch_first=True).to(self.device) feats = self.model.wav2vec2.extract_features(wavs, wav_lengths)[0] return {'hidden_states': feats}
def espnet_hubert_custom(ckpt, *args, config=None, **kwargs): return _UpstreamExpert(ckpt, *args, **kwargs)
def espnet_hubert_local(*args, **kwargs): return espnet_hubert_custom(*args, **kwargs)
def cvhubert(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/espnet_cvhubert/resolve/main/exp/hubert_iter2_train_ssl_torchaudiohubert_base_960h_pretrain_it2_raw/latest.pth' config_url = 'https://huggingface.co/espnet/espnet_cvhubert/raw/main/exp/hubert_iter2_train_ssl_torchaudiohubert_base_960h_pretrain_it2_raw/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def wavlablm_ek_40k(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/WavLabLM-EK-40k/resolve/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_cont_raw_layer_9/5epoch.pth' config_url = 'https://huggingface.co/espnet/WavLabLM-EK-40k/raw/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_cont_raw_layer_9/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def wavlablm_ms_40k(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/WavLabLM-MS-40k/resolve/main/exp_babel/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_babel_light_raw_layer_9/5epoch.pth' config_url = 'https://huggingface.co/espnet/WavLabLM-MS-40k/raw/main/exp_babel/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_babel_light_raw_layer_9/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def wavlablm_mk_40k(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/WavLabLM-MK-40k/resolve/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_raw_layer_9/valid.acc_m.ave_10best.pth' config_url = 'https://huggingface.co/espnet/WavLabLM-MK-40k/raw/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_raw_layer_9/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def espnet_hubert_base_iter1(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/resolve/main/exp/hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/valid.loss.ave.pth' config_url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/raw/main/exp/hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def espnet_hubert_base_iter0(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/resolve/main/exp/hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/valid.loss.ave.pth' config_url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/raw/main/exp/hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
def espnet_hubert_large_gs_ll60k(*args, refresh=False, **kwargs): url = 'https://huggingface.co/espnet/hubert_large_gs_16_librilight60k/resolve/main/mnt/datastore/exp/hubert_iter1_train_ssl_torchaudiohubert_large_960h_pretrain_it2_bins_raw/valid.loss.ave_10best.pth' config_url = 'https://huggingface.co/espnet/hubert_large_gs_16_librilight60k/blob/main/mnt/datastore/exp/hubert_iter1_train_ssl_torchaudiohubert_large_960h_pretrain_it2_bins_raw/config.yaml' (ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh) return espnet_hubert_custom(ckpt, config)
class UpstreamExpert(nn.Module): def __init__(self, ckpt: str=None, model_config: str=None, **kwargs): '\n Args:\n ckpt:\n The checkpoint path for loading your pretrained weights.\n Can be assigned by the -k option in run_downstream.py\n\n model_config:\n The config path for constructing your model.\n Might not needed if you also save that in your checkpoint file.\n Can be assigned by the -g option in run_downstream.py\n ' super().__init__() self.name = '[Example UpstreamExpert]' print(f'{self.name} - You can use model_config to construct your customized model: {model_config}') print(f'{self.name} - You can use ckpt to load your pretrained weights: {ckpt}') print(f"{self.name} - If you store the pretrained weights and model config in a single file, you can just choose one argument (ckpt or model_config) to pass. It's up to you!") self.model1 = nn.Linear(1, HIDDEN_DIM) self.model2 = nn.Linear(HIDDEN_DIM, HIDDEN_DIM) def get_downsample_rates(self, key: str) -> int: "\n Since we do not do any downsampling in this example upstream\n All keys' corresponding representations have downsample rate of 1\n " return 1 def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor])])]: '\n When the returning Dict contains the List with more than one Tensor,\n those Tensors should be in the same shape to train a weighted-sum on them.\n ' wavs = pad_sequence(wavs, batch_first=True).unsqueeze((- 1)) hidden = self.model1(wavs) feature = self.model2(hidden) return {'hidden_states': [hidden, feature], 'PR': [hidden, feature], 'ASR': [hidden, feature], 'QbE': [hidden, feature], 'SID': [hidden, feature], 'ASV': [hidden, feature], 'SD': [hidden, feature], 'ER': [hidden, feature], 'SF': [hidden, feature], 'SE': [hidden, feature], 'SS': [hidden, feature], 'secret': [hidden, feature]}
def customized_upstream(*args, **kwargs): "\n To enable your customized pretrained model, you only need to implement\n upstream/example/expert.py and leave this file as is. This file is\n used to register the UpstreamExpert in upstream/example/expert.py\n The following is a brief introduction of the registration mechanism.\n\n The s3prl/hub.py will collect all the entries registered in this file\n (callable variables without the underscore prefix) as a centralized\n upstream factory. One can pick up this upstream from the factory via\n\n 1.\n from s3prl.hub import customized_upstream\n model = customized_upstream(ckpt, model_config)\n\n 2.\n model = torch.hub.load(\n 'your_s3prl_path',\n 'customized_upstream',\n ckpt,\n model_config,\n source='local',\n )\n\n Our run_downstream.py and downstream/runner.py follows the first usage\n " return _UpstreamExpert(*args, **kwargs)
class UpstreamExpert(torch.nn.Module): def __init__(self, ckpt, **kwds): super().__init__() try: self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(ckpt) except: if ('base' in ckpt): alter_extractor = 'facebook/hubert-base-ls960' else: alter_extractor = 'facebook/hubert-large-ll60k' logger.info(f"The model {ckpt} on huggingface does not have a correspoinding feature extractor. Using {alter_extractor}'s feature extractor as the alternative.") self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(alter_extractor) self.model = HubertModel.from_pretrained(ckpt) def get_downsample_rates(self, key: str=None) -> int: return 320 def forward(self, wavs): device = wavs[0].device wavs = [wav.detach().cpu().numpy() for wav in wavs] input_values = self.extracter(wavs, return_tensors='pt', padding=True, return_attention_mask=True, sampling_rate=SAMPLE_RATE).to(device) output_values = self.model(**input_values, output_hidden_states=True) return {'hidden_states': output_values.hidden_states}
def hf_hubert_custom(ckpt, *args, **kwargs): return _UpstreamExpert(ckpt, *args, **kwargs)
class UpstreamExpert(torch.nn.Module): def __init__(self, ckpt, **kwds): super().__init__() self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(ckpt) self.model = Wav2Vec2Model.from_pretrained(ckpt) def get_downsample_rates(self, key: str=None) -> int: return 320 def forward(self, wavs): device = wavs[0].device wavs = [wav.detach().cpu().numpy() for wav in wavs] input_values = self.extracter(wavs, return_tensors='pt', padding=True, return_attention_mask=True, sampling_rate=SAMPLE_RATE).to(device) output_values = self.model(**input_values, output_hidden_states=True) return {'hidden_states': output_values.hidden_states}
def hf_wav2vec2_custom(ckpt, *args, **kwargs): return _UpstreamExpert(ckpt, *args, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): from fairseq.data.dictionary import Dictionary (state, cfg) = load_fairseq_ckpt(fairseq_source) dicts: List[Dictionary] = state['task_state']['dictionaries'] symbols = [dictionary.symbols for dictionary in dicts] output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'dictionaries_symbols': symbols} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'dictionaries_symbols']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(HubertPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(HubertConfig, ckpt_state['model_cfg']) model = HubertModel(model_cfg, task_cfg, ckpt_state['dictionaries_symbols']) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
def hubert_custom(ckpt: str, legacy: bool=False, fairseq: bool=False, refresh: bool=False, **kwargs): assert (not (legacy and fairseq)), "The option 'legacy' will directly load a fairseq checkpoint, while the option 'fairseq' will first convert the fairseq checkpoint to be fairseq indenpendent and then load the checkpoint. These two options cannot be used jointly." if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) if fairseq: ckpt: Path = Path(ckpt) converted_ckpt = (ckpt.parent / f'{ckpt.stem}.converted.pt') lock_file = Path((str(converted_ckpt) + '.lock')) logger.info(f'Converting a fairseq checkpoint: {ckpt}') logger.info(f'To: {converted_ckpt}') with FileLock(str(lock_file)): if ((not converted_ckpt.is_file()) or (refresh and ((time.time() - os.path.getmtime(ckpt)) > NEW_ENOUGH_SECS))): load_and_convert_fairseq_ckpt(ckpt, converted_ckpt) ckpt = converted_ckpt assert os.path.isfile(ckpt) if legacy: return _LegacyUpstreamExpert(ckpt, **kwargs) else: return _UpstreamExpert(ckpt, **kwargs)
def hubert_local(*args, **kwargs): return hubert_custom(*args, **kwargs)
def hubert_url(*args, **kwargs): return hubert_custom(*args, **kwargs)
def hubert(refresh=False, *args, **kwargs): '\n The default model - Base\n refresh (bool): whether to download ckpt/config again if existed\n ' return hubert_base(*args, refresh=refresh, **kwargs)
def hubert_base(refresh=False, legacy=False, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/hubert_base_ls960.pt' return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
def hubert_large_ll60k(refresh=False, legacy=False, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/hubert/hubert_large_ll60k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/hubert_large_ll60k.pt' return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
def hubert_base_robust_mgr(refresh=False, legacy=False, **kwargs): '\n The Base model, continually trained with Libri 960 hr with Musan noise, Gaussian noise and Reverberation.\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/kphuang68/HuBERT_base_robust_mgr/resolve/main/HuBERT_base_robust_mgr_best_loss_2.7821.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/HuBERT_base_robust_mgr_best_loss_2.7821.pt' return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
def mhubert_base_vp_en_es_fr_it3(refresh=False, **kwds): kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/mhubert_base_vp_en_es_fr_it3.pt' return hubert_custom(refresh=refresh, **kwds)
def contentvec(refresh=False, **kwds): kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km100.pt' return hubert_custom(refresh=refresh, **kwds)
def contentvec_km100(refresh=False, **kwds): kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km100.pt' return hubert_custom(refresh=refresh, **kwds)
def contentvec_km500(refresh=False, **kwds): kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km500.pt' return hubert_custom(refresh=refresh, **kwds)
class Hook(): def __init__(self, module_path, transform, unique_identifier=None): self.module_path = module_path self.transform = transform self.unique_identifier = (unique_identifier or module_path) self.handler = None assert isinstance(self.module_path, str) assert callable(self.transform) assert isinstance(self.unique_identifier, str)
class initHook(type): def __call__(cls, *args, **kwargs): instance = super().__call__(*args, **kwargs) for hook in instance.hooks: if (hook.handler is None): instance._register_hook_handler(hook) return instance
class UpstreamBase(nn.Module, metaclass=initHook): def __init__(self, hooks: List[Tuple]=None, hook_postprocess: Callable[([List[Tuple[(str, Tensor)]]], List[Tuple[(str, Tensor)]])]=None, **kwargs): '\n Args:\n hooks: each Tuple is an argument list for the Hook initializer\n ' super().__init__() self.hooks: List[Hook] = ([Hook(*hook) for hook in hooks] if hooks else []) self.hook_postprocess = hook_postprocess self._hook_hiddens: List[Tuple(str, Tensor)] = [] def remove_all_hooks(self): for hook in self.hooks: hook.handler.remove() self.hooks.clear() def remove_hook(self, unique_identifier: str): updated_hooks = [] for hook in self.hooks: if (hook.unique_identifier == unique_identifier): hook.handler.remove() else: updated_hooks.append(hook) self.hooks = updated_hooks def add_hook(self, *args, **kwargs): hook = Hook(*args, **kwargs) self._register_hook_handler(hook) self.hooks.append(hook) def _register_hook_handler(self, hook: Hook): module = eval(hook.module_path) if (not isinstance(module, nn.Module)): show(f'[UpstreamBase] - {hook.module_path} is not a valid nn.Module. Skip.', file=sys.stderr) return if callable(hook.handler): show(f'[UpstreamBase] - Existing hook handler for {hook.unique_identifier} is found. Remove the existing one.', file=sys.stderr) hook.handler.remove() def generate_hook_handler(hiddens: List, hook: Hook): def hook_handler(self, input, output): hiddens.append((hook.unique_identifier, hook.transform(input, output))) return hook_handler hook.handler = module.register_forward_hook(generate_hook_handler(self._hook_hiddens, hook)) def __call__(self, wavs: List[Tensor], *args, **kwargs): self._hook_hiddens.clear() result = (super().__call__(wavs, *args, **kwargs) or {}) assert isinstance(result, dict) if (len(self._hook_hiddens) > 0): if ((result.get('_hidden_states_info') is not None) or (result.get('hidden_states') is not None) or (result.get('last_hidden_state') is not None)): show("[UpstreamBase] - If there are registered hooks, '_hidden_states_info', 'hidden_states', and 'last_hidden_state' are reserved and should not be included in child class's return dict.", file=sys.stderr) raise ValueError hook_hiddens = self._hook_hiddens.copy() self._hook_hiddens.clear() if callable(self.hook_postprocess): hook_hiddens = self.hook_postprocess(hook_hiddens) (result['_hidden_states_info'], result['hidden_states']) = zip(*hook_hiddens) result['last_hidden_state'] = result['hidden_states'][(- 1)] for (layer_id, hidden_state) in enumerate(result['hidden_states']): result[f'hidden_state_{layer_id}'] = hidden_state return result
class Featurizer(nn.Module): def __init__(self, upstream: UpstreamBase, feature_selection: str='hidden_states', upstream_device: str='cuda', layer_selection: int=None, normalize: bool=False, **kwargs): super().__init__() self.name = 'Featurizer' upstream.eval() paired_wavs = [torch.randn(SAMPLE_RATE).to(upstream_device)] with torch.no_grad(): paired_features = upstream(paired_wavs) if (feature_selection not in paired_features): if ('hidden_states' in paired_features): show(f'[{self.name}] - Warning: {feature_selection} is not a supported args.upstream_feature_selection. Using "hidden_states" as the default key.', file=sys.stderr) feature_selection = 'hidden_states' else: show(f'[{self.name}] - Error: {feature_selection} is not a supported args.upstream_feature_selection. The default key "hidden_states" is also not supported. Please specify -s with the following options: {list(paired_wavs.keys())}', file=sys.stderr) raise ValueError self.feature_selection = feature_selection self.layer_selection = layer_selection self.normalize = normalize feature = self._select_feature(paired_features) if isinstance(feature, (list, tuple)): self.layer_num = len(feature) show(f'[{self.name}] - Take a list of {self.layer_num} features and weighted sum them.', file=sys.stderr) self.weights = nn.Parameter(torch.zeros(self.layer_num)) feature = self._weighted_sum([f.cpu() for f in feature]) else: feature = feature.cpu() self.output_dim = feature.size((- 1)) if hasattr(upstream, 'get_downsample_rates'): self.downsample_rate = upstream.get_downsample_rates(feature_selection) show(f"[{self.name}] - The selected feature {feature_selection}'s downsample rate is {self.downsample_rate}", file=sys.stderr) else: self.downsample_rate = round((max((len(wav) for wav in paired_wavs)) / feature.size(1))) show(f'[{self.name}] - Warning: The provided upstream does not give statis downsample rate by the "get_downsample_rates" interface (see upstream/example/expert.py). The downsample rate is calculated dynamically basing on the shape of the input waveforms v.s. the output features: {self.downsample_rate}', file=sys.stderr) def _select_feature(self, features): feature = features.get(self.feature_selection) if isinstance(feature, dict): feature = list(feature.values()) if (isinstance(feature, (list, tuple)) and (len(feature) == 1)): feature = feature[0] if (isinstance(feature, (list, tuple)) and isinstance(self.layer_selection, int)): feature = feature[self.layer_selection] return feature def _weighted_sum(self, feature): assert (self.layer_num == len(feature)), "If you run into this error, there is a great chance you are finetuning the upstream with wav2vec2's transformer blocks in weighted-sum mode (default), including wav2vec2, hubert, and decoar2. These models use the layerdrop technique which causes the different number of layer forwards between different model forwards, resulting in different number of hidden states for different model forwards. Hence, finetuning these upstreams is essentially incompatible with weight-sum mode unless you turn off the layerdrop option in fairseq. See: https://github.com/pytorch/fairseq/blob/f6abcc2a67328bee8b15c596bb626ce2d720aae6/fairseq/models/wav2vec/wav2vec2.py#L857 However, since finetuning upstreams will backward the gradient through all layers which serves the same functionality as weighted-sum: all layers can be used for different downstream tasks. Hence instead of finetuning upstream with weighted-sum, we suggest to follow the more common setting: finetuning upstream with the last layer. Please use the following options: --upstream_trainable --upstream_feature_selection last_hidden_state. Or: -f -s last_hidden_state" stacked_feature = torch.stack(feature, dim=0) if self.normalize: stacked_feature = F.layer_norm(stacked_feature, (stacked_feature.shape[(- 1)],)) (_, *origin_shape) = stacked_feature.shape stacked_feature = stacked_feature.view(self.layer_num, (- 1)) norm_weights = F.softmax(self.weights, dim=(- 1)) weighted_feature = (norm_weights.unsqueeze((- 1)) * stacked_feature).sum(dim=0) weighted_feature = weighted_feature.view(*origin_shape) return weighted_feature def tolist(self, paired_wavs: List[Tensor], paired_feature: Tensor): assert (paired_feature.dim() == 3), '(batch_size, max_seq_len, feat_dim)' feature_len = [round((len(wav) / self.downsample_rate)) for wav in paired_wavs] length_diff = abs((paired_feature.size(1) - round((max([len(wav) for wav in paired_wavs]) / self.downsample_rate)))) assert (length_diff < TOLERABLE_SEQLEN_DIFF), f'{length_diff} >= {TOLERABLE_SEQLEN_DIFF}' feature = [f[:l] for (f, l) in zip(paired_feature, feature_len)] return feature def forward(self, paired_wavs: List[Tensor], paired_features: Dict[(str, Union[(Tensor, List[Tensor], Dict[(str, Tensor)])])]): feature = self._select_feature(paired_features) if isinstance(feature, (list, tuple)): feature = self._weighted_sum(feature) return self.tolist(paired_wavs, feature)
class UpstreamExpert(torch.nn.Module): def __init__(self, ckpt, **kwds): super().__init__() checkpoint = torch.load(ckpt) assert (checkpoint['cfg']['model']['_name'] in ['hubert_pruner', 'student_hubert']) self.cfg = LightHuBERTConfig(checkpoint['cfg']['model']) if (checkpoint['cfg']['model']['_name'] == 'hubert_pruner'): if checkpoint['cfg']['model']['pruner_supernet'].lower().endswith('small.yaml'): self.cfg.supernet_type = 'small' elif checkpoint['cfg']['model']['pruner_supernet'].lower().endswith('base.yaml'): self.cfg.supernet_type = 'base' self.model = LightHuBERT(self.cfg) self.model.load_state_dict(checkpoint['model'], strict=False) if (checkpoint['cfg']['model']['_name'] == 'student_hubert'): subnet = self.model.supernet.max_subnet else: subnet = self.model.supernet.subnet self.model.set_sample_config(subnet) self.model.encoder.layerdrop = 0.0 params = self.model.calc_sampled_param_num() logger.info(f'LightHubert subnet (Params {(params / 1000000.0):.0f}M) | {subnet}') def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs): wavs = [F.layer_norm(wav, wav.shape) for wav in wavs] device = wavs[0].device wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device) wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1))) hs = self.model.extract_features(pad_sequence(wavs, batch_first=True), padding_mask=wav_padding_mask, ret_hs=True)[0] return {'hidden_states': hs}
def lighthubert_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def lighthubert_url(ckpt, refresh=False, *args, **kwargs): '\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return lighthubert_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def lighthubert(refresh=False, *args, **kargs): '\n The default model - Small\n refresh (bool): whether to download ckpt/config again if existed\n ' return lighthubert_small(*args, refresh=refresh, **kargs)
def lighthubert_small(refresh=False, *args, **kwargs): '\n The small model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_small.pt' return lighthubert_url(*args, refresh=refresh, **kwargs)
def lighthubert_base(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_base.pt' return lighthubert_url(*args, refresh=refresh, **kwargs)
def lighthubert_stage1(refresh=False, *args, **kwargs): '\n The Stage1 model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_stage1.pt' return lighthubert_url(*args, refresh=refresh, **kwargs)
def is_xla_tensor(tensor): return (torch.is_tensor(tensor) and (tensor.device.type == 'xla'))
def index_put(tensor, indices, value): if is_xla_tensor(tensor): for _ in range(indices.dim(), tensor.dim()): indices = indices.unsqueeze((- 1)) if (indices.size((- 1)) < tensor.size((- 1))): indices = indices.expand_as(tensor) tensor = (torch.mul(tensor, (~ indices)) + torch.mul(value, indices)) else: tensor[indices] = value return tensor
def pad_to_multiple(x, multiple, dim=(- 1), value=0): if (x is None): return (None, 0) tsz = x.size(dim) m = (tsz / multiple) remainder = ((math.ceil(m) * multiple) - tsz) if m.is_integer(): return (x, 0) pad_offset = (((0,) * ((- 1) - dim)) * 2) return (F.pad(x, (*pad_offset, 0, remainder), value=value), remainder)
def gelu_accurate(x): if (not hasattr(gelu_accurate, '_a')): gelu_accurate._a = math.sqrt((2 / math.pi)) return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3)))))))
def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
def relu_squared(x: torch.Tensor): return F.relu(x).pow(2)
def deprecation_warning(message, stacklevel=3): warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable: 'Returns the activation function corresponding to `activation`' def gelu_accurate(x): if (not hasattr(gelu_accurate, '_a')): gelu_accurate._a = math.sqrt((2 / math.pi)) return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3))))))) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) if (activation == 'relu'): return F.relu elif (activation == 'relu_squared'): return relu_squared elif (activation == 'gelu'): return gelu elif (activation == 'gelu_fast'): deprecation_warning('--activation-fn=gelu_fast has been renamed to gelu_accurate') return gelu_accurate elif (activation == 'gelu_accurate'): return gelu_accurate elif (activation == 'tanh'): return torch.tanh elif (activation == 'linear'): return (lambda x: x) elif (activation == 'swish'): return torch.nn.SiLU else: raise RuntimeError('--activation-fn {} not supported'.format(activation))
class SLayerNorm(nn.LayerNorm): 'LayerNorm: variable 1-D size\n __base__: torch.nn.LayerNorm\n ' def __init__(self, normalized_shape: int, eps: float=1e-05, elementwise_affine: bool=True) -> None: super(SLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine) self.staticize() def staticize(self): self.sample_normalized_shape = self.normalized_shape[0] self.samples = {'weight': self.weight, 'bias': self.bias} def set_sample_config(self, sample_normalized_shape: int): self.sample_normalized_shape = sample_normalized_shape self._sample_parameters() def _sample_parameters(self): if self.elementwise_affine: self.samples['weight'] = self.weight[:self.sample_normalized_shape] self.samples['bias'] = self.bias[:self.sample_normalized_shape] else: self.samples['weight'] = None self.samples['bias'] = None return self.samples def calc_sampled_param_num(self): return (self.samples['weight'].numel() + self.samples['bias'].numel()) def get_complexity(self, sequence_length): return (sequence_length * self.sample_normalized_shape) @property def weights(self): return (self.samples['weight'] if self.elementwise_affine else None) @property def biases(self): return (self.samples['bias'] if self.elementwise_affine else None) @property def normalized_shapes(self): if isinstance(self.sample_normalized_shape, numbers.Integral): sample_normalized_shape = (self.sample_normalized_shape,) else: sample_normalized_shape = self.sample_normalized_shape return tuple(sample_normalized_shape) def forward(self, input: Tensor) -> Tensor: self._sample_parameters() return F.layer_norm(input, self.normalized_shapes, self.weights, self.biases, self.eps) def extra_repr(self) -> str: return f'{self.normalized_shape}, eps={self.eps}, elementwise_affine={self.elementwise_affine}' def clone_model(self, normalized_shape: int): self.set_sample_config(normalized_shape) m = nn.LayerNorm(normalized_shape, self.eps, self.elementwise_affine) if m.elementwise_affine: m = m.to(self.weight.device) m = m.to(self.weight.dtype) m.weight.data.copy_(self.weights) m.bias.data.copy_(self.biases) return m.eval() @classmethod def build_from(cls, m: nn.LayerNorm): normalized_shape = m.normalized_shape eps = m.eps elementwise_affine = m.elementwise_affine _m = cls(normalized_shape, eps, elementwise_affine) if _m.elementwise_affine: _m = _m.to(m.weight.device) _m = _m.to(m.weight.dtype) _m.weight.data.copy_(m.weight) _m.bias.data.copy_(m.bias) return _m
class ConvFeatureExtractionModel(nn.Module): def __init__(self, conv_layers: List[Tuple[(int, int, int)]], dropout: float=0.0, mode: str='default', conv_bias: bool=False): super().__init__() assert (mode in {'default', 'layer_norm'}) def block(n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False): def make_conv(): conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias) nn.init.kaiming_normal_(conv.weight) return conv assert ((is_layer_norm and is_group_norm) == False), 'layer norm and group norm are exclusive' if is_layer_norm: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.Sequential(TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast()), nn.GELU()) elif is_group_norm: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU()) else: return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU()) in_d = 1 self.conv_layers = nn.ModuleList() for (i, cl) in enumerate(conv_layers): assert (len(cl) == 3), ('invalid conv definition: ' + str(cl)) (dim, k, stride) = cl self.conv_layers.append(block(in_d, dim, k, stride, is_layer_norm=(mode == 'layer_norm'), is_group_norm=((mode == 'default') and (i == 0)), conv_bias=conv_bias)) in_d = dim def forward(self, x): x = x.unsqueeze(1) for conv in self.conv_layers: x = conv(x) return x
class Spectrogram(nn.Module): def __init__(self, cfg, **kwargs): super(Spectrogram, self).__init__() self.eps = 1e-08 self.cfg = cfg self.n_fft = cfg['spectrogram']['n_fft'] self.hop_length = cfg['spectrogram']['hop_length'] self.win_length = cfg['spectrogram']['win_length'] if (cfg['spectrogram']['window'] == 'hann'): self.window = torch.hann_window(cfg['spectrogram']['win_length']).to(device) else: raise ValueError('Window type not defined.') self.center = cfg['spectrogram']['center'] self.log = cfg['spectrogram']['log'] def get_output_dim(self): return ((self.n_fft // 2) + 1) def get_downsample_rate(self): return self.hop_length def forward(self, waveform): x = torch.transpose(torch.abs(torch.stft(waveform, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center, pad_mode='reflect', normalized=False, return_complex=True)), 0, 1) if self.log: x = torch.log(torch.clamp(x, min=self.eps)) return x
class UpstreamExpert(nn.Module): '\n Extract spectrogram features from wavforms with torchaudio\n ' def __init__(self, model_config=None, **kwargs): super(UpstreamExpert, self).__init__() with open(model_config, 'r') as file: self.config = yaml.load(file, Loader=yaml.FullLoader) self.extracter = Spectrogram(self.config) self.output_dim = self.extracter.get_output_dim() self.downsample_rate = self.extracter.get_downsample_rate() def get_downsample_rates(self, key: str) -> int: return self.downsample_rate def _extractor_forward(self, wavs): feats = [] for wav in wavs: feats.append(self.extracter(wav)) return feats def forward(self, wavs): feats = self._extractor_forward(wavs) feats = pad_sequence(feats, batch_first=True) return {'last_hidden_state': [feats], 'hidden_states': [feats]}
def stft_mag(model_config, *args, **kwargs): assert os.path.isfile(model_config) return _UpstreamExpert(model_config, *args, **kwargs)
def mae_ast_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def mae_ast_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return mae_ast_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def mae_ast_frame(refresh=False, *args, **kwargs): '\n The MAE-AST Frame model, 12-layered, random masking\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://www.cs.utexas.edu/~harwath/model_checkpoints/mae_ast/random_frame_75_12LayerEncoder.pt' return mae_ast_url(*args, refresh=refresh, **kwargs)
def mae_ast_patch(refresh=False, *args, **kwargs): '\n The MAE-AST Patch model, 12-layered, chunked masking\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://www.cs.utexas.edu/~harwath/model_checkpoints/mae_ast/chunk_patch_75_12LayerEncoder.pt' return mae_ast_url(*args, refresh=refresh, **kwargs)