code
stringlengths
17
6.64M
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return get_basic_timestamp_embeddings(audio, model)
def get_basic_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return model.get_timestamp_embeddings(audio)
def get_basic_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='openmic2008', n_classes=20) model = PasstBasicWrapper(mel=mel, net=net, scene_embedding_size=(768 + 20), timestamp_embedding_size=(768 + 20), **kwargs) return model
class PasstBasicWrapper(nn.Module): def __init__(self, mel: nn.Module, net: nn.Module, max_model_window=10000, timestamp_window=160, timestamp_hop=50, scene_hop=2500, scene_embedding_size=1295, timestamp_embedding_size=1295, mode='all'): '\n @param mel: spectrogram extractor\n @param net: network module\n @param max_model_window: maximum clip length allowed by the model (milliseconds).\n @param timestamp_hop: the hop lengh for timestamp embeddings (milliseconds).\n @param scene_hop: the hop lengh for scene embeddings (milliseconds).\n @param scene_embedding_size:\n @param timestamp_embedding_size:\n @param mode: "all", "embed_only", "logits"\n ' torch.nn.Module.__init__(self) self.mel = mel self.net = net self.device_proxy = nn.Parameter(torch.zeros(1)) self.sample_rate = mel.sr self.timestamp_window = int(((timestamp_window * self.sample_rate) / 1000)) self.max_model_window = int(((max_model_window * self.sample_rate) / 1000)) self.timestamp_hop = int(((timestamp_hop * self.sample_rate) / 1000)) self.scene_hop = int(((scene_hop * self.sample_rate) / 1000)) self.scene_embedding_size = scene_embedding_size self.timestamp_embedding_size = timestamp_embedding_size self.mode = mode def device(self): return self.device_proxy.device def forward(self, x): specs = self.mel(x) specs = specs.unsqueeze(1) (x, features) = self.net(specs) if (self.mode == 'all'): embed = torch.cat([x, features], dim=1) elif (self.mode == 'embed_only'): embed = features elif (self.mode == 'logits'): embed = x else: raise RuntimeError(f"mode='{self.mode}' is not recognized not in: all, embed_only, logits") return embed def get_scene_embeddings(self, audio): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' (n_sounds, n_samples) = audio.shape if (n_samples <= self.max_model_window): embed = self(audio.contiguous()) return embed (embeddings, timestamps) = self.get_timestamp_embeddings(audio, window_size=self.max_model_window, hop=self.scene_hop) return embeddings.mean(axis=1) def get_timestamp_embeddings(self, audio: torch.Tensor, window_size=None, hop=None, pad=None): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' if (hop is None): hop = self.timestamp_hop if (window_size is None): window_size = self.timestamp_window if (pad is None): pad = (window_size // 2) (n_sounds, n_samples) = audio.shape audio = audio.unsqueeze(1) padded = F.pad(audio, (pad, pad), mode='reflect') padded = padded.unsqueeze(1) segments = F.unfold(padded, kernel_size=(1, window_size), stride=(1, hop)).transpose((- 1), (- 2)).transpose(0, 1) timestamps = [] embeddings = [] for (i, segment) in enumerate(segments): timestamps.append(i) emb = self(segment) embeddings.append(emb) timestamps = (((torch.as_tensor(timestamps) * hop) * 1000.0) / self.sample_rate) embeddings = torch.stack(embeddings).transpose(0, 1) timestamps = timestamps.unsqueeze(0).expand(n_sounds, (- 1)) return (embeddings, timestamps) def get_timestamp_mels(self, audio: torch.Tensor, window_size=None, hop=None, pad=None): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' if (hop is None): hop = self.timestamp_hop if (window_size is None): window_size = self.timestamp_window if (pad is None): pad = (window_size // 2) (n_sounds, n_samples) = audio.shape audio = audio.unsqueeze(1) padded = F.pad(audio, (pad, pad), mode='reflect') padded = padded.unsqueeze(1) segments = F.unfold(padded, kernel_size=(1, window_size), stride=(1, hop)).transpose((- 1), (- 2)).transpose(0, 1) timestamps = [] embeddings = [] for (i, segment) in enumerate(segments): timestamps.append(i) embeddings.append(self.mel(segment).reshape(n_sounds, (128 * 6))) timestamps = (((torch.as_tensor(timestamps) * hop) * 1000.0) / self.sample_rate) embeddings = torch.stack(embeddings).transpose(0, 1) timestamps = timestamps.unsqueeze(0).expand(n_sounds, (- 1)) return (embeddings, timestamps)
def passt_base(**kwds): return _UpstreamExpert('base', **kwds)
def passt_base2level(**kwds): return _UpstreamExpert('base2level', **kwds)
def passt_base2levelmel(**kwds): return _UpstreamExpert('base2levelmel', **kwds)
def passt_base20sec(**kwds): return _UpstreamExpert('base20sec', **kwds)
def passt_base30sec(**kwds): return _UpstreamExpert('base30sec', **kwds)
def passt_hop100base(**kwds): return _UpstreamExpert('hop100base', **kwds)
def passt_hop100base2lvl(**kwds): return _UpstreamExpert('hop100base2lvl', **kwds)
def passt_hop100base2lvlmel(**kwds): return _UpstreamExpert('hop100base2lvlmel', **kwds)
def passt_hop160base(**kwds): return _UpstreamExpert('hop160base', **kwds)
def passt_hop160base2lvl(**kwds): return _UpstreamExpert('hop160base2lvl', **kwds)
def passt_hop160base2lvlmel(**kwds): return _UpstreamExpert('hop160base2lvlmel', **kwds)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str): '\n Args:\n fairseq_source (str): either URL for the tar file or the untared directory path\n output_path (str): converted checkpoint path\n ' if fairseq_source.startswith('http'): tar_file = _urls_to_filepaths(fairseq_source) tar_dir = (Path(tar_file).parent / 'vq_wav2vec_kmeans_roberta/') tar_dir.mkdir(exist_ok=True, parents=True) check_call(cwd=f'tar -xf {tar_file} -C {tar_dir}'.split(), shell=True) else: fairseq_source = Path(fairseq_source) assert fairseq_source.is_dir() tar_dir = fairseq_source pt_files = glob.glob(os.path.join(tar_dir, '*.pt')) assert (len(pt_files) == 1) pt_file = pt_files[0] (state, cfg) = load_fairseq_ckpt((tar_dir / Path(pt_file).name), bpe='gpt2', load_checkpoint_heads=True) assert isinstance(cfg['model'], argparse.Namespace), 'RoBERTa pre-training does not have dataclass config and only accepts Namespace' with (tar_dir / 'dict.txt').open() as f: text_dictionary: str = f.read() output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'text_dictionary': text_dictionary} Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path) load_converted_model(output_path)
def load_converted_model(ckpt: str): import tempfile ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'text_dictionary']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') with tempfile.NamedTemporaryFile() as f: with open(f.name, 'w') as f_handle: f_handle.write(ckpt_state['text_dictionary']) dictionary = Dictionary.load(f.name) dictionary.add_symbol('<mask>') model_cfg = ckpt_state['model_cfg'] assert isinstance(model_cfg, argparse.Namespace), 'RoBERTa pre-training does not have dataclass config and only accepts Namespace' encoder = RobertaEncoder(model_cfg, dictionary) model = RobertaModel(model_cfg, encoder) model.load_state_dict(ckpt_state['model_weight']) task_cfg = merge_with_parent(MaskedLMConfig, ckpt_state['task_cfg']) return (model, task_cfg)
class _vq_wav2vec_codeids_wrapper(torch.nn.Module): def __init__(self, vq_wav2vec): super().__init__() self.vq_wav2vec = vq_wav2vec self.featurizer = _Featurizer(vq_wav2vec, 'codeids', upstream_device='cpu') def _indices_to_string(self, sentence_idxs): return (('<s> ' + ' '.join(('-'.join(map(str, idx.tolist())) for idx in sentence_idxs))) + ' </s>') def forward(self, wavs): batch_idxs = self.featurizer(wavs, self.vq_wav2vec(wavs)) strings = [self._indices_to_string(sentence_idxs) for sentence_idxs in batch_idxs] return strings
def _roberta_local(frontend_model, model_name_or_path, checkpoint_file, **kwargs): assert isinstance(frontend_model, torch.nn.Module) assert os.path.exists(model_name_or_path) return _LegacyUpstreamExpert(frontend_model, model_name_or_path, checkpoint_file, **kwargs)
def _vq_wav2vec_roberta(vq_wav2vec, **kwargs): frontend_model = _vq_wav2vec_codeids_wrapper(vq_wav2vec) return _roberta_local(frontend_model, **kwargs)
def vq_wav2vec_kmeans_roberta(refresh=False, legacy=False, **kwargs): if legacy: vq_wav2vec = getattr(s3prl.hub, f'vq_wav2vec_kmeans')(refresh=refresh) tar_file = _urls_to_filepaths('https://dl.fbaipublicfiles.com/fairseq/wav2vec/bert_kmeans.tar', refresh=refresh) tar_dir = os.path.join(os.path.dirname(tar_file), 'vq_wav2vec_kmeans_roberta/') os.makedirs(tar_dir, exist_ok=True) os.system(f'tar -xf {tar_file} -C {tar_dir}') pt_files = glob.glob(os.path.join(tar_dir, '*.pt')) assert (len(pt_files) == 1) pt_file = pt_files[0] kwargs['model_name_or_path'] = tar_dir kwargs['checkpoint_file'] = pt_file return _vq_wav2vec_roberta(vq_wav2vec, **kwargs) else: vq_wav2vec = getattr(s3prl.hub, f'vq_wav2vec_kmeans')() return _UpstreamExpert(_urls_to_filepaths('https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq_wav2vec_kmeans_roberta.pt', refresh=refresh), _vq_wav2vec_codeids_wrapper(vq_wav2vec))
def discretebert(*args, legacy=False, **kwargs): return vq_wav2vec_kmeans_roberta(*args, legacy=legacy, **kwargs)
def spec_augment_local(ckpt, options_config=None, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, options_config=options_config, **kwargs)
def spec_augment_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return spec_augment_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def spec_augment(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/spz3yulaye8ppgr/states-100000.ckpt?dl=1' return spec_augment_url(*args, refresh=refresh, **kwargs)
def ssast_frame_base(refresh: bool=False, window_secs: float=1.0, **kwargs): ckpt = _urls_to_filepaths('https://www.dropbox.com/s/nx6nl4d4bl71sm8/SSAST-Base-Frame-400.pth?dl=1', refresh=refresh) return _UpstreamExpert(ckpt, 'base_f', window_secs)
def ssast_patch_base(refresh: bool=False, window_secs: float=1.0, **kwargs): ckpt = _urls_to_filepaths('https://www.dropbox.com/s/ewrzpco95n9jdz6/SSAST-Base-Patch-400.pth?dl=1', refresh=refresh) return _UpstreamExpert(ckpt, 'base_p', window_secs)
def tera_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def tera_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return tera_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def tera(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_960hr(refresh, *args, **kwargs)
def tera_100hr(refresh=False, *args, **kwargs): '\n The tera base model on 100hr\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_logMelBase_T_F_M_AdamW_b32_200k_100hr(refresh, *args, **kwargs)
def tera_960hr(refresh=False, *args, **kwargs): '\n The tera base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1(refresh, *args, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/o36qt1zgtn3tsep/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_M_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq + mag\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/l9ryl82k64m1lsk/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/98olxex0m7oy9ta/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/2ekbt2gxlkbvfz0/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: sequence length of 3k (instead of 1.5k)\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/tfysinbalpm3gsj/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq + mag\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/tera/resolve/main/tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1/states-1000000.ckpt' return tera_url(*args, refresh=refresh, **kwargs)
def tera_fbankBase_T_F_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 240-dim fbank\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/i32ob29m6afufot/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) checkpoint = torch.load(ckpt) self.cfg = WavLMConfig(checkpoint['cfg']) self.model = WavLM(self.cfg) self.model.load_state_dict(checkpoint['model']) self.model.feature_grad_mult = 0.0 self.model.encoder.layerdrop = 0.0 if (len(self.hooks) == 0): module_name = 'self.model.encoder.layers' for module_id in range(len(eval(module_name))): self.add_hook(f'{module_name}[{module_id}]', (lambda input, output: input[0].transpose(0, 1))) self.add_hook('self.model.encoder', (lambda input, output: output[0])) self._init_layerdrop = self.model.encoder.layerdrop @property def layer_drop(self): return self.model.encoder.layerdrop def set_layer_drop(self, layerdrop: float=None): if isinstance(layerdrop, float): self.model.encoder.layerdrop = layerdrop elif (layerdrop is None): self.model.encoder.layerdrop = self._init_layerdrop else: raise ValueError('layerdrop can only be float or None') def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs): if self.cfg.normalize: wavs = [F.layer_norm(wav, wav.shape) for wav in wavs] device = wavs[0].device wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device) wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1))) padded_wav = pad_sequence(wavs, batch_first=True) (features, feat_padding_mask) = self.model.extract_features(padded_wav, padding_mask=wav_padding_mask, mask=False)
def unispeech_sat_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def unispeech_sat_url(ckpt, refresh=False, *args, **kwargs): '\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return unispeech_sat_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def unispeech_sat(refresh=False, *args, **kwargs): '\n The default model - Base-Plus\n refresh (bool): whether to download ckpt/config again if existed\n ' return unispeech_sat_base_plus(*args, refresh=refresh, **kwargs)
def unispeech_sat_base(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_base.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def unispeech_sat_base_plus(refresh=False, *args, **kwargs): '\n The Base-Plus model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_base_plus.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def unispeech_sat_large(refresh=False, *args, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_large.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def load_fairseq_ckpt(source: str, **override): from fairseq.checkpoint_utils import load_checkpoint_to_cpu from omegaconf import OmegaConf source = str(source) if source.startswith('http'): fairseq_path = _urls_to_filepaths(source) else: fairseq_path = source state = load_checkpoint_to_cpu(fairseq_path, arg_overrides=override) cfg = OmegaConf.to_container(state['cfg']) assert (type(cfg) == dict) return (state, cfg)
def merge_with_parent(dc: dataclass, cfg: dict): assert is_dataclass(dc) assert (type(cfg) == dict) cfg = deepcopy(cfg) def fix_cfg(cfg): target_keys = set(dc.__dataclass_fields__.keys()) for k in list(cfg.keys()): if (k not in target_keys): del cfg[k] fix_cfg(cfg) assert (len(cfg) > 0) return dc(**cfg)
def extract_hidden_states(model): model.eval() with torch.no_grad(): return model(get_pseudo_wavs())['hidden_states']
def are_same_models(model1, model2): hs1 = extract_hidden_states(model1) hs2 = extract_hidden_states(model2) for (h1, h2) in zip(hs1, hs2): assert torch.allclose(h1, h2)
def models_all_close(*models): assert (len(models) > 1) for model in models[1:]: are_same_models(models[0], model)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) self.model = VGGish(ckpt, **kwargs) def get_downsample_rates(self, key: str) -> int: return 16000 def forward(self, wavs): device = wavs[0].device outputs = [] for wav in wavs: feature = waveform_to_examples(wav.detach().cpu().numpy()) feature = self.model(feature.to(device)) if (feature.dim() == 1): feature = feature.unsqueeze(0) outputs.append(feature) outputs = pad_sequence(outputs, batch_first=True) return {'last_hidden_state': outputs, 'hidden_states': [outputs]}
def _load_state_dict_from_url(url): path = _urls_to_filepaths(url) return torch.load(path, map_location='cpu')
def _vggish_from_torch_hub(urls, *args, **kwargs): '\n The model from `torch.hub.load`\n urls (dict): LINKS\n ' kwargs['ckpt'] = {'vggish': _load_state_dict_from_url(urls['vggish']), 'pca': _load_state_dict_from_url(urls['pca'])} return _UpstreamExpert(*args, **kwargs)
def vggish(*args, **kwargs): '\n The default model\n ' urls = {'vggish': 'https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish-10086976.pth', 'pca': 'https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish_pca_params-970ea276.pth'} return _vggish_from_torch_hub(urls, *args, **kwargs)
class VGG(nn.Module): def __init__(self, features): super(VGG, self).__init__() self.features = features self.embeddings = nn.Sequential(nn.Linear(((512 * 4) * 6), 4096), nn.ReLU(True), nn.Linear(4096, 4096), nn.ReLU(True), nn.Linear(4096, 128), nn.ReLU(True)) def forward(self, x): x = self.features(x) x = torch.transpose(x, 1, 3) x = torch.transpose(x, 1, 2) x = x.contiguous() x = x.view(x.size(0), (- 1)) return self.embeddings(x)
class Postprocessor(nn.Module): '\n Post-processes VGGish embeddings. Returns a torch.Tensor instead of a\n numpy array in order to preserve the gradient.\n "The initial release of AudioSet included 128-D VGGish embeddings for each\n segment of AudioSet. These released embeddings were produced by applying\n a PCA transformation (technically, a whitening transform is included as well)\n and 8-bit quantization to the raw embedding output from VGGish, in order to\n stay compatible with the YouTube-8M project which provides visual embeddings\n in the same format for a large set of YouTube videos. This class implements\n the same PCA (with whitening) and quantization transformations."\n ' def __init__(self): 'Constructs a postprocessor.' super(Postprocessor, self).__init__() self.pca_eigen_vectors = torch.empty((vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE), dtype=torch.float) self.pca_means = torch.empty((vggish_params.EMBEDDING_SIZE, 1), dtype=torch.float) self.pca_eigen_vectors = nn.Parameter(self.pca_eigen_vectors, requires_grad=False) self.pca_means = nn.Parameter(self.pca_means, requires_grad=False) def postprocess(self, embeddings_batch): 'Applies tensor postprocessing to a batch of embeddings.\n Args:\n embeddings_batch: An tensor of shape [batch_size, embedding_size]\n containing output from the embedding layer of VGGish.\n Returns:\n A tensor of the same shape as the input, containing the PCA-transformed,\n quantized, and clipped version of the input.\n ' assert (len(embeddings_batch.shape) == 2), ('Expected 2-d batch, got %r' % (embeddings_batch.shape,)) assert (embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE), ('Bad batch shape: %r' % (embeddings_batch.shape,)) pca_applied = torch.mm(self.pca_eigen_vectors, (embeddings_batch.t() - self.pca_means)).t() clipped_embeddings = torch.clamp(pca_applied, vggish_params.QUANTIZE_MIN_VAL, vggish_params.QUANTIZE_MAX_VAL) quantized_embeddings = torch.round(((clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL) * (255.0 / (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL)))) return torch.squeeze(quantized_embeddings) def forward(self, x): return self.postprocess(x)
def make_layers(): layers = [] in_channels = 1 for v in [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M']: if (v == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
class VGGish(VGG): def __init__(self, ckpt, pretrained=True, postprocess=True, progress=True, **kwargs): super().__init__(make_layers()) if pretrained: super().load_state_dict(ckpt['vggish']) self.postprocess = postprocess if self.postprocess: self.pproc = Postprocessor() if pretrained: ckpt['pca'][vggish_params.PCA_EIGEN_VECTORS_NAME] = torch.as_tensor(ckpt['pca'][vggish_params.PCA_EIGEN_VECTORS_NAME], dtype=torch.float) ckpt['pca'][vggish_params.PCA_MEANS_NAME] = torch.as_tensor(ckpt['pca'][vggish_params.PCA_MEANS_NAME].reshape((- 1), 1), dtype=torch.float) self.pproc.load_state_dict(ckpt['pca']) def forward(self, x): x = super().forward(x) if self.postprocess: x = self._postprocess(x) return x def _postprocess(self, x): return self.pproc(x)
def vq_apc(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' return vq_apc_360hr(*args, refresh=refresh, **kwargs)
def vq_apc_360hr(refresh=False, *args, **kwargs): '\n The vq-apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/vq_apc_360hr.ckpt' return vq_apc_url(*args, refresh=refresh, **kwargs)
def vq_apc_960hr(refresh=False, *args, **kwargs): '\n The vq-apc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/vq_apc_960hr.ckpt' return vq_apc_url(*args, refresh=refresh, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2VecConfig, ckpt_state['model_cfg']) model = Wav2VecModel(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
def vq_wav2vec_custom(ckpt: str, *args, legacy: bool=False, refresh: bool=False, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) assert os.path.isfile(ckpt) if legacy: return _LegacyUpstreamExpert(ckpt, *args, **kwargs) else: return _UpstreamExpert(ckpt, *args, **kwargs)
def wav2vec2_local(*args, **kwargs): return vq_wav2vec_custom(*args, **kwargs)
def wav2vec2_url(*args, **kwargs): return vq_wav2vec_custom(*args, **kwargs)
def vq_wav2vec(refresh=False, *args, **kwargs): '\n The default model - Large model with context vector\n refresh (bool): whether to download ckpt/config again if existed\n ' return vq_wav2vec_gumbel(*args, refresh=refresh, **kwargs)
def vq_wav2vec_gumbel(refresh=False, legacy=False, **kwargs): '\n The Gumbel model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq-wav2vec.pt' return vq_wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def vq_wav2vec_kmeans(refresh=False, legacy=False, **kwargs): '\n The K-means model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq-wav2vec_kmeans.pt' return vq_wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2VecConfig, ckpt_state['model_cfg']) model = Wav2VecModel(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) (self.model, task_cfg) = load_converted_model(ckpt) if (len(self.hooks) == 0): self.add_hook('self.model.feature_extractor', (lambda input, output: output.transpose(1, 2))) self.add_hook('self.model.feature_aggregator', (lambda input, output: output.transpose(1, 2))) module_name = 'self.model.feature_aggregator.conv_layers' for conv_id in range((len(eval(module_name)) - 1)): self.add_hook(f'{module_name}[{(conv_id + 1)}]', (lambda input, output: input[0].transpose(1, 2))) def get_downsample_rates(self, key: str) -> int: return 160 def forward(self, wavs): '\n Code snippet modified from fairseq\n ' result = {} padded_wav = pad_sequence(wavs, batch_first=True) features = self.model.feature_extractor(padded_wav) result['z'] = features.transpose(1, 2).contiguous() if self.model.vector_quantizer: q_res = self.model.vector_quantizer(features, produce_targets=True) result['codewords'] = q_res['x'].transpose(1, 2).contiguous() result['codeids'] = q_res['targets'] features = q_res['x'] x = self.model.dropout_feats(features) x = self.model.feature_aggregator(x) result['c'] = x.transpose(1, 2).contiguous() result['default'] = result['c'] return result
class LegacyUpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) logger.warning('Use the legacy expert for HuBERT which depends on fairseq') import fairseq from fairseq.models.wav2vec import Wav2VecModel from packaging import version if (version.parse(fairseq.__version__) > version.parse('0.10.2')): cp = torch.load(ckpt) args = cp['args'] base_wav2vec_architecture(args) self.model = Wav2VecModel.build_model(args, task=None) self.model.load_state_dict(cp['model']) elif (version.parse(fairseq.__version__) == version.parse('0.10.2')): cp = torch.load(ckpt) self.model = Wav2VecModel.build_model(cp['args'], task=None) self.model.load_state_dict(cp['model']) else: raise NotImplementedError if (len(self.hooks) == 0): self.add_hook('self.model.feature_extractor', (lambda input, output: output.transpose(1, 2))) self.add_hook('self.model.feature_aggregator', (lambda input, output: output.transpose(1, 2))) module_name = 'self.model.feature_aggregator.conv_layers' for conv_id in range((len(eval(module_name)) - 1)): self.add_hook(f'{module_name}[{(conv_id + 1)}]', (lambda input, output: input[0].transpose(1, 2))) def get_downsample_rates(self, key: str) -> int: return 160 def forward(self, wavs): '\n Code snippet modified from fairseq\n ' result = {} padded_wav = pad_sequence(wavs, batch_first=True) features = self.model.feature_extractor(padded_wav) result['z'] = features.transpose(1, 2).contiguous() if self.model.vector_quantizer: q_res = self.model.vector_quantizer(features, produce_targets=True) result['codewords'] = q_res['x'].transpose(1, 2).contiguous() result['codeids'] = q_res['targets'] features = q_res['x'] x = self.model.dropout_feats(features) x = self.model.feature_aggregator(x) result['c'] = x.transpose(1, 2).contiguous() result['default'] = result['c'] return result
def base_wav2vec_architecture(args): conv_feature_layers = '[(512, 10, 5)]' conv_feature_layers += ' + [(512, 8, 4)]' conv_feature_layers += ' + [(512, 4, 2)] * 3' args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers) args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9') args.prediction_steps = getattr(args, 'prediction_steps', 12) args.num_negatives = getattr(args, 'num_negatives', 1) args.sample_distance = getattr(args, 'sample_distance', None) args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', 0) args.dropout = getattr(args, 'dropout', 0.0) args.dropout_features = getattr(args, 'dropout_features', 0.0) args.dropout_agg = getattr(args, 'dropout_agg', 0.0) args.encoder = getattr(args, 'encoder', 'cnn') args.aggregator = getattr(args, 'aggregator', 'cnn') args.skip_connections_feat = getattr(args, 'skip_connections_feat', False) args.skip_connections_agg = getattr(args, 'skip_connections_agg', False) args.residual_scale = getattr(args, 'residual_scale', 0.5) args.gru_dim = getattr(args, 'gru_dim', 512) args.no_conv_bias = getattr(args, 'no_conv_bias', False) args.agg_zero_pad = getattr(args, 'agg_zero_pad', False) args.log_compression = getattr(args, 'log_compression', False) args.balanced_classes = getattr(args, 'balanced_classes', False) args.infonce = getattr(args, 'infonce', False) args.project_features = getattr(args, 'project_features', 'none') args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False) args.offset = getattr(args, 'offset', 'auto') args.activation = getattr(args, 'activation', 'relu') args.vq_type = getattr(args, 'vq_type', 'none') args.vq_vars = getattr(args, 'vq_vars', 320) args.vq_groups = getattr(args, 'vq_groups', 2) args.vq_dim = getattr(args, 'vq_dim', 0) args.vq_depth = getattr(args, 'vq_depth', 1) args.combine_groups = getattr(args, 'combine_groups', False) args.vq_temp = getattr(args, 'vq_temp', '(2.0, 0.5, 0.999995)') args.vq_gamma = getattr(args, 'vq_gamma', 0.25)
def wav2vec_custom(ckpt: str, *args, legacy: bool=False, refresh: bool=False, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
def wav2vec_local(*args, **kwargs): return wav2vec_custom(*args, **kwargs)
def wav2vec_url(*args, **kwargs): return wav2vec_custom(*args, **kwargs)
def wav2vec(refresh=False, *args, **kwargs): '\n The default model - Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' return wav2vec_large(*args, refresh=refresh, **kwargs)
def wav2vec_large(refresh=False, legacy=False, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_large.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_large.pt' return wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2Vec2Config, ckpt_state['model_cfg']) model = Wav2Vec2Model(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
def wav2vec2_custom(ckpt: str, legacy: bool=False, fairseq: bool=False, refresh: bool=False, **kwargs): assert (not (legacy and fairseq)), "The option 'legacy' will directly load a fairseq checkpoint, while the option 'fairseq' will first convert the fairseq checkpoint to be fairseq indenpendent and then load the checkpoint. These two options cannot be used jointly." if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) if fairseq: ckpt: Path = Path(ckpt) converted_ckpt = (ckpt.parent / f'{ckpt.stem}.converted.pt') lock_file = Path((str(converted_ckpt) + '.lock')) logger.info(f'Converting a fairseq checkpoint: {ckpt}') logger.info(f'To: {converted_ckpt}') with FileLock(str(lock_file)): if ((not converted_ckpt.is_file()) or (refresh and ((time.time() - os.path.getmtime(ckpt)) > NEW_ENOUGH_SECS))): load_and_convert_fairseq_ckpt(ckpt, converted_ckpt) ckpt = converted_ckpt assert os.path.isfile(ckpt) if legacy: return _LegacyUpstreamExpert(ckpt, **kwargs) else: return _UpstreamExpert(ckpt, **kwargs)
def wav2vec2_local(*args, **kwargs): return wav2vec2_custom(*args, **kwargs)
def wav2vec2_url(*args, **kwargs): return wav2vec2_custom(*args, **kwargs)
def wav2vec2(refresh=False, *args, **kwargs): '\n The default model - Base\n refresh (bool): whether to download ckpt/config again if existed\n ' return wav2vec2_base_960(*args, refresh=refresh, **kwargs)
def wav2vec2_base_960(refresh=False, legacy=False, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_small.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_small.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_960(refresh=False, legacy=False, **kwargs): '\n The Large model trained on LibriSpeech 960 hours of data\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/libri960_big.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/libri960_big.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_ll60k(refresh=False, legacy=False, **kwargs): '\n The Large model trained on Libri-light 60k hours of data\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec_vox_new.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_lv60_cv_swbd_fsh(refresh=False, legacy=False, **kwargs): '\n The Large model trained on Libri-Light 60k hours + CommonVoice + Switchboard + Fisher\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/w2v_large_lv_fsh_swbd_cv.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/w2v_large_lv_fsh_swbd_cv.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xlsr_53(refresh=False, legacy=False, **kwargs): '\n The wav2vec 2.0 model trained on multilingual presented in https://arxiv.org/abs/2006.13979\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr_53_56k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr_53_56k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_300m(refresh=False, legacy=False, **kwargs): '\n XLS-R, this smallest size has the same parameters as the Largs model of wav2vec 2.0 and HuBERT\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_300m.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_300m.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_1b(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_960m_1000k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_960m_1000k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def xls_r_2b(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/xlsr2_2B_1000k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/xlsr2_2B_1000k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_relpos(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_relpos_PT_no_FT' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/LL_relpos_PT_no_FT.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_rope(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/conformer/wav2vec2/librilight/LL_rope_PT_no_FT' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/LL_rope_PT_no_FT.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_large_voxpopuli_100k(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/voxpopuli/models/wav2vec2_large_100k.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_large_100k.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_base_s2st_es_voxpopuli(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/es/transformer_B.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_base_s2st_es_voxpopuli.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_conformer_large_s2st_es_voxpopuli(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/es/conformer_L.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_conformer_large_s2st_es_voxpopuli.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)
def wav2vec2_base_s2st_en_librilight(refresh=False, legacy=False, **kwargs): kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/speech_to_speech/s2st_finetuning/w2v2/en/transformer_B.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/wav2vec2_base_s2st_en_librilight.pt' return wav2vec2_custom(refresh=refresh, legacy=legacy, **kwargs)