code
stringlengths
17
6.64M
def load_model(model_path='', mode='all', **kwds): model = get_basic_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return get_basic_timestamp_embeddings(audio, model)
def get_basic_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return model.get_timestamp_embeddings(audio)
def get_basic_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop100', input_tdim=3200) model = PasstBasicWrapper(mel=mel, net=net, **kwargs) return model
def load_model(model_path='', mode='all', **kwds): model = get_2lvl_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' (embed1, t1) = model.get_timestamp_embeddings(audio) (embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5)) embed = torch.cat((embed1, embed2), dim=(- 1)) return (embed, t1)
def get_2lvl_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop100', input_tdim=3200) model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs) return model
def load_model(model_path='', mode='all', **kwds): model = get_2lvl_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' (embedmel, tmel) = model.get_timestamp_mels(audio, window_size=(6 * 100)) (embed1, t1) = model.get_timestamp_embeddings(audio) (embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5)) embed = torch.cat((embed1, embed2, embedmel), dim=(- 1)) return (embed, t1)
def get_2lvl_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop100', input_tdim=3200) model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs) return model
def load_model(model_path='', mode='all', **kwds): model = get_basic_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return get_basic_timestamp_embeddings(audio, model)
def get_basic_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return model.get_timestamp_embeddings(audio)
def get_basic_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop160', input_tdim=2000) model = PasstBasicWrapper(mel=mel, net=net, **kwargs) return model
def load_model(model_path='', mode='all', **kwds): model = get_2lvl_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' (embed1, t1) = model.get_timestamp_embeddings(audio) (embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5)) embed = torch.cat((embed1, embed2), dim=(- 1)) return (embed, t1)
def get_2lvl_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop160', input_tdim=2000) model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs) return model
def load_model(model_path='', mode='all', **kwds): model = get_2lvl_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' (embedmel, tmel) = model.get_timestamp_mels(audio, window_size=(6 * 160)) (embed1, t1) = model.get_timestamp_embeddings(audio) (embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5)) embed = torch.cat((embed1, embed2, embedmel), dim=(- 1)) return (embed, t1)
def get_2lvl_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='stfthop160', input_tdim=2000) model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs) return model
class AugmentMelSTFT(nn.Module): def __init__(self, n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=1, fmax_aug_range=1000): torch.nn.Module.__init__(self) self.win_length = win_length self.n_mels = n_mels self.n_fft = n_fft self.sr = sr self.htk = htk self.fmin = fmin if (fmax is None): fmax = ((sr // 2) - (fmax_aug_range // 2)) self.fmax = fmax self.norm = norm self.hopsize = hopsize self.register_buffer('window', torch.hann_window(win_length, periodic=False), persistent=False) assert (fmin_aug_range >= 1), f'fmin_aug_range={fmin_aug_range} should be >=1; 1 means no augmentation' assert (fmin_aug_range >= 1), f'fmax_aug_range={fmax_aug_range} should be >=1; 1 means no augmentation' self.fmin_aug_range = fmin_aug_range self.fmax_aug_range = fmax_aug_range self.register_buffer('preemphasis_coefficient', torch.as_tensor([[[(- 0.97), 1]]]), persistent=False) if (freqm == 0): self.freqm = torch.nn.Identity() else: self.freqm = torchaudio.transforms.FrequencyMasking(freqm, iid_masks=True) if (timem == 0): self.timem = torch.nn.Identity() else: self.timem = torchaudio.transforms.TimeMasking(timem, iid_masks=True) def forward(self, x): x = nn.functional.conv1d(x.unsqueeze(1), self.preemphasis_coefficient).squeeze(1) x = torch.stft(x, self.n_fft, hop_length=self.hopsize, win_length=self.win_length, center=True, normalized=False, window=self.window, return_complex=True) x = torch.view_as_real(x) x = (x ** 2).sum(dim=(- 1)) fmin = (self.fmin + torch.randint(self.fmin_aug_range, (1,)).item()) fmax = ((self.fmax + (self.fmax_aug_range // 2)) - torch.randint(self.fmax_aug_range, (1,)).item()) if (not self.training): fmin = self.fmin fmax = self.fmax (mel_basis, _) = torchaudio.compliance.kaldi.get_mel_banks(self.n_mels, self.n_fft, self.sr, fmin, fmax, vtln_low=100.0, vtln_high=(- 500.0), vtln_warp_factor=1.0) mel_basis = torch.as_tensor(torch.nn.functional.pad(mel_basis, (0, 1), mode='constant', value=0), device=x.device) with torch.cuda.amp.autocast(enabled=False): melspec = torch.matmul(mel_basis, x) melspec = (melspec + 1e-05).log() if self.training: melspec = self.freqm(melspec) melspec = self.timem(melspec) melspec = ((melspec + 4.5) / 5.0) return melspec def extra_repr(self): return 'winsize={}, hopsize={}'.format(self.win_length, self.hopsize)
def load_model(model_path='', mode='all', **kwds): model = get_basic_model(mode=mode, **kwds) return model
def get_scene_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' return model.get_scene_embeddings(audio)
def get_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return get_basic_timestamp_embeddings(audio, model)
def get_basic_timestamp_embeddings(audio, model): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' return model.get_timestamp_embeddings(audio)
def get_basic_model(**kwargs): mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000) net = get_model_passt(arch='openmic2008', n_classes=20) model = PasstBasicWrapper(mel=mel, net=net, scene_embedding_size=(768 + 20), timestamp_embedding_size=(768 + 20), **kwargs) return model
class PasstBasicWrapper(nn.Module): def __init__(self, mel: nn.Module, net: nn.Module, max_model_window=10000, timestamp_window=160, timestamp_hop=50, scene_hop=2500, scene_embedding_size=1295, timestamp_embedding_size=1295, mode='all'): '\n @param mel: spectrogram extractor\n @param net: network module\n @param max_model_window: maximum clip length allowed by the model (milliseconds).\n @param timestamp_hop: the hop lengh for timestamp embeddings (milliseconds).\n @param scene_hop: the hop lengh for scene embeddings (milliseconds).\n @param scene_embedding_size:\n @param timestamp_embedding_size:\n @param mode: "all", "embed_only", "logits"\n ' torch.nn.Module.__init__(self) self.mel = mel self.net = net self.device_proxy = nn.Parameter(torch.zeros(1)) self.sample_rate = mel.sr self.timestamp_window = int(((timestamp_window * self.sample_rate) / 1000)) self.max_model_window = int(((max_model_window * self.sample_rate) / 1000)) self.timestamp_hop = int(((timestamp_hop * self.sample_rate) / 1000)) self.scene_hop = int(((scene_hop * self.sample_rate) / 1000)) self.scene_embedding_size = scene_embedding_size self.timestamp_embedding_size = timestamp_embedding_size self.mode = mode def device(self): return self.device_proxy.device def forward(self, x): specs = self.mel(x) specs = specs.unsqueeze(1) (x, features) = self.net(specs) if (self.mode == 'all'): embed = torch.cat([x, features], dim=1) elif (self.mode == 'embed_only'): embed = features elif (self.mode == 'logits'): embed = x else: raise RuntimeError(f"mode='{self.mode}' is not recognized not in: all, embed_only, logits") return embed def get_scene_embeddings(self, audio): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n ' (n_sounds, n_samples) = audio.shape if (n_samples <= self.max_model_window): embed = self(audio.contiguous()) return embed (embeddings, timestamps) = self.get_timestamp_embeddings(audio, window_size=self.max_model_window, hop=self.scene_hop) return embeddings.mean(axis=1) def get_timestamp_embeddings(self, audio: torch.Tensor, window_size=None, hop=None, pad=None): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' if (hop is None): hop = self.timestamp_hop if (window_size is None): window_size = self.timestamp_window if (pad is None): pad = (window_size // 2) (n_sounds, n_samples) = audio.shape audio = audio.unsqueeze(1) padded = F.pad(audio, (pad, pad), mode='reflect') padded = padded.unsqueeze(1) segments = F.unfold(padded, kernel_size=(1, window_size), stride=(1, hop)).transpose((- 1), (- 2)).transpose(0, 1) timestamps = [] embeddings = [] for (i, segment) in enumerate(segments): timestamps.append(i) emb = self(segment) embeddings.append(emb) timestamps = (((torch.as_tensor(timestamps) * hop) * 1000.0) / self.sample_rate) embeddings = torch.stack(embeddings).transpose(0, 1) timestamps = timestamps.unsqueeze(0).expand(n_sounds, (- 1)) return (embeddings, timestamps) def get_timestamp_mels(self, audio: torch.Tensor, window_size=None, hop=None, pad=None): '\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n ' if (hop is None): hop = self.timestamp_hop if (window_size is None): window_size = self.timestamp_window if (pad is None): pad = (window_size // 2) (n_sounds, n_samples) = audio.shape audio = audio.unsqueeze(1) padded = F.pad(audio, (pad, pad), mode='reflect') padded = padded.unsqueeze(1) segments = F.unfold(padded, kernel_size=(1, window_size), stride=(1, hop)).transpose((- 1), (- 2)).transpose(0, 1) timestamps = [] embeddings = [] for (i, segment) in enumerate(segments): timestamps.append(i) embeddings.append(self.mel(segment).reshape(n_sounds, (128 * 6))) timestamps = (((torch.as_tensor(timestamps) * hop) * 1000.0) / self.sample_rate) embeddings = torch.stack(embeddings).transpose(0, 1) timestamps = timestamps.unsqueeze(0).expand(n_sounds, (- 1)) return (embeddings, timestamps)
def passt_base(**kwds): return _UpstreamExpert('base', **kwds)
def passt_base2level(**kwds): return _UpstreamExpert('base2level', **kwds)
def passt_base2levelmel(**kwds): return _UpstreamExpert('base2levelmel', **kwds)
def passt_base20sec(**kwds): return _UpstreamExpert('base20sec', **kwds)
def passt_base30sec(**kwds): return _UpstreamExpert('base30sec', **kwds)
def passt_hop100base(**kwds): return _UpstreamExpert('hop100base', **kwds)
def passt_hop100base2lvl(**kwds): return _UpstreamExpert('hop100base2lvl', **kwds)
def passt_hop100base2lvlmel(**kwds): return _UpstreamExpert('hop100base2lvlmel', **kwds)
def passt_hop160base(**kwds): return _UpstreamExpert('hop160base', **kwds)
def passt_hop160base2lvl(**kwds): return _UpstreamExpert('hop160base2lvl', **kwds)
def passt_hop160base2lvlmel(**kwds): return _UpstreamExpert('hop160base2lvlmel', **kwds)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str): '\n Args:\n fairseq_source (str): either URL for the tar file or the untared directory path\n output_path (str): converted checkpoint path\n ' if fairseq_source.startswith('http'): tar_file = _urls_to_filepaths(fairseq_source) tar_dir = (Path(tar_file).parent / 'vq_wav2vec_kmeans_roberta/') tar_dir.mkdir(exist_ok=True, parents=True) check_call(cwd=f'tar -xf {tar_file} -C {tar_dir}'.split(), shell=True) else: fairseq_source = Path(fairseq_source) assert fairseq_source.is_dir() tar_dir = fairseq_source pt_files = glob.glob(os.path.join(tar_dir, '*.pt')) assert (len(pt_files) == 1) pt_file = pt_files[0] (state, cfg) = load_fairseq_ckpt((tar_dir / Path(pt_file).name), bpe='gpt2', load_checkpoint_heads=True) assert isinstance(cfg['model'], argparse.Namespace), 'RoBERTa pre-training does not have dataclass config and only accepts Namespace' with (tar_dir / 'dict.txt').open() as f: text_dictionary: str = f.read() output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'text_dictionary': text_dictionary} Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path) load_converted_model(output_path)
def load_converted_model(ckpt: str): import tempfile ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'text_dictionary']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') with tempfile.NamedTemporaryFile() as f: with open(f.name, 'w') as f_handle: f_handle.write(ckpt_state['text_dictionary']) dictionary = Dictionary.load(f.name) dictionary.add_symbol('<mask>') model_cfg = ckpt_state['model_cfg'] assert isinstance(model_cfg, argparse.Namespace), 'RoBERTa pre-training does not have dataclass config and only accepts Namespace' encoder = RobertaEncoder(model_cfg, dictionary) model = RobertaModel(model_cfg, encoder) model.load_state_dict(ckpt_state['model_weight']) task_cfg = merge_with_parent(MaskedLMConfig, ckpt_state['task_cfg']) return (model, task_cfg)
class _vq_wav2vec_codeids_wrapper(torch.nn.Module): def __init__(self, vq_wav2vec): super().__init__() self.vq_wav2vec = vq_wav2vec self.featurizer = _Featurizer(vq_wav2vec, 'codeids', upstream_device='cpu') def _indices_to_string(self, sentence_idxs): return (('<s> ' + ' '.join(('-'.join(map(str, idx.tolist())) for idx in sentence_idxs))) + ' </s>') def forward(self, wavs): batch_idxs = self.featurizer(wavs, self.vq_wav2vec(wavs)) strings = [self._indices_to_string(sentence_idxs) for sentence_idxs in batch_idxs] return strings
def _roberta_local(frontend_model, model_name_or_path, checkpoint_file, **kwargs): assert isinstance(frontend_model, torch.nn.Module) assert os.path.exists(model_name_or_path) return _LegacyUpstreamExpert(frontend_model, model_name_or_path, checkpoint_file, **kwargs)
def _vq_wav2vec_roberta(vq_wav2vec, **kwargs): frontend_model = _vq_wav2vec_codeids_wrapper(vq_wav2vec) return _roberta_local(frontend_model, **kwargs)
def vq_wav2vec_kmeans_roberta(refresh=False, legacy=False, **kwargs): if legacy: vq_wav2vec = getattr(s3prl.hub, f'vq_wav2vec_kmeans')(refresh=refresh) tar_file = _urls_to_filepaths('https://dl.fbaipublicfiles.com/fairseq/wav2vec/bert_kmeans.tar', refresh=refresh) tar_dir = os.path.join(os.path.dirname(tar_file), 'vq_wav2vec_kmeans_roberta/') os.makedirs(tar_dir, exist_ok=True) os.system(f'tar -xf {tar_file} -C {tar_dir}') pt_files = glob.glob(os.path.join(tar_dir, '*.pt')) assert (len(pt_files) == 1) pt_file = pt_files[0] kwargs['model_name_or_path'] = tar_dir kwargs['checkpoint_file'] = pt_file return _vq_wav2vec_roberta(vq_wav2vec, **kwargs) else: vq_wav2vec = getattr(s3prl.hub, f'vq_wav2vec_kmeans')() return _UpstreamExpert(_urls_to_filepaths('https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq_wav2vec_kmeans_roberta.pt', refresh=refresh), _vq_wav2vec_codeids_wrapper(vq_wav2vec))
def discretebert(*args, legacy=False, **kwargs): return vq_wav2vec_kmeans_roberta(*args, legacy=legacy, **kwargs)
def spec_augment_local(ckpt, options_config=None, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, options_config=options_config, **kwargs)
def spec_augment_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return spec_augment_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def spec_augment(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/spz3yulaye8ppgr/states-100000.ckpt?dl=1' return spec_augment_url(*args, refresh=refresh, **kwargs)
def ssast_frame_base(refresh: bool=False, window_secs: float=1.0, **kwargs): ckpt = _urls_to_filepaths('https://www.dropbox.com/s/nx6nl4d4bl71sm8/SSAST-Base-Frame-400.pth?dl=1', refresh=refresh) return _UpstreamExpert(ckpt, 'base_f', window_secs)
def ssast_patch_base(refresh: bool=False, window_secs: float=1.0, **kwargs): ckpt = _urls_to_filepaths('https://www.dropbox.com/s/ewrzpco95n9jdz6/SSAST-Base-Patch-400.pth?dl=1', refresh=refresh) return _UpstreamExpert(ckpt, 'base_p', window_secs)
def tera_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def tera_url(ckpt, refresh=False, *args, **kwargs): '\n The model from URL\n ckpt (str): URL\n ' return tera_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def tera(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_960hr(refresh, *args, **kwargs)
def tera_100hr(refresh=False, *args, **kwargs): '\n The tera base model on 100hr\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_logMelBase_T_F_M_AdamW_b32_200k_100hr(refresh, *args, **kwargs)
def tera_960hr(refresh=False, *args, **kwargs): '\n The tera base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n ' return tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1(refresh, *args, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/o36qt1zgtn3tsep/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_M_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq + mag\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/l9ryl82k64m1lsk/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/98olxex0m7oy9ta/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/2ekbt2gxlkbvfz0/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: sequence length of 3k (instead of 1.5k)\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/tfysinbalpm3gsj/states-1000000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
def tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs): '\n Feature: 80-dim log Mel\n Alteration: time + freq + mag\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/tera/resolve/main/tera_logMelBase_T_F_M_AdamW_b32_1m_960hr_drop1/states-1000000.ckpt' return tera_url(*args, refresh=refresh, **kwargs)
def tera_fbankBase_T_F_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs): '\n Feature: 240-dim fbank\n Alteration: time + freq\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n ' kwargs['ckpt'] = 'https://www.dropbox.com/s/i32ob29m6afufot/states-200000.ckpt?dl=1' return tera_url(*args, refresh=refresh, **kwargs)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) checkpoint = torch.load(ckpt) self.cfg = WavLMConfig(checkpoint['cfg']) self.model = WavLM(self.cfg) self.model.load_state_dict(checkpoint['model']) self.model.feature_grad_mult = 0.0 self.model.encoder.layerdrop = 0.0 if (len(self.hooks) == 0): module_name = 'self.model.encoder.layers' for module_id in range(len(eval(module_name))): self.add_hook(f'{module_name}[{module_id}]', (lambda input, output: input[0].transpose(0, 1))) self.add_hook('self.model.encoder', (lambda input, output: output[0])) self._init_layerdrop = self.model.encoder.layerdrop @property def layer_drop(self): return self.model.encoder.layerdrop def set_layer_drop(self, layerdrop: float=None): if isinstance(layerdrop, float): self.model.encoder.layerdrop = layerdrop elif (layerdrop is None): self.model.encoder.layerdrop = self._init_layerdrop else: raise ValueError('layerdrop can only be float or None') def get_downsample_rates(self, key: str) -> int: return 320 def forward(self, wavs): if self.cfg.normalize: wavs = [F.layer_norm(wav, wav.shape) for wav in wavs] device = wavs[0].device wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device) wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1))) padded_wav = pad_sequence(wavs, batch_first=True) (features, feat_padding_mask) = self.model.extract_features(padded_wav, padding_mask=wav_padding_mask, mask=False)
def unispeech_sat_local(ckpt, *args, **kwargs): '\n The model from local ckpt\n ckpt (str): PATH\n ' assert os.path.isfile(ckpt) return _UpstreamExpert(ckpt, *args, **kwargs)
def unispeech_sat_url(ckpt, refresh=False, *args, **kwargs): '\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n ' return unispeech_sat_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
def unispeech_sat(refresh=False, *args, **kwargs): '\n The default model - Base-Plus\n refresh (bool): whether to download ckpt/config again if existed\n ' return unispeech_sat_base_plus(*args, refresh=refresh, **kwargs)
def unispeech_sat_base(refresh=False, *args, **kwargs): '\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_base.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def unispeech_sat_base_plus(refresh=False, *args, **kwargs): '\n The Base-Plus model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_base_plus.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def unispeech_sat_large(refresh=False, *args, **kwargs): '\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/unispeech_sat_large.pt' return unispeech_sat_url(*args, refresh=refresh, **kwargs)
def load_fairseq_ckpt(source: str, **override): from fairseq.checkpoint_utils import load_checkpoint_to_cpu from omegaconf import OmegaConf source = str(source) if source.startswith('http'): fairseq_path = _urls_to_filepaths(source) else: fairseq_path = source state = load_checkpoint_to_cpu(fairseq_path, arg_overrides=override) cfg = OmegaConf.to_container(state['cfg']) assert (type(cfg) == dict) return (state, cfg)
def merge_with_parent(dc: dataclass, cfg: dict): assert is_dataclass(dc) assert (type(cfg) == dict) cfg = deepcopy(cfg) def fix_cfg(cfg): target_keys = set(dc.__dataclass_fields__.keys()) for k in list(cfg.keys()): if (k not in target_keys): del cfg[k] fix_cfg(cfg) assert (len(cfg) > 0) return dc(**cfg)
def extract_hidden_states(model): model.eval() with torch.no_grad(): return model(get_pseudo_wavs())['hidden_states']
def are_same_models(model1, model2): hs1 = extract_hidden_states(model1) hs2 = extract_hidden_states(model2) for (h1, h2) in zip(hs1, hs2): assert torch.allclose(h1, h2)
def models_all_close(*models): assert (len(models) > 1) for model in models[1:]: are_same_models(models[0], model)
class UpstreamExpert(UpstreamBase): def __init__(self, ckpt, **kwargs): super().__init__(**kwargs) self.model = VGGish(ckpt, **kwargs) def get_downsample_rates(self, key: str) -> int: return 16000 def forward(self, wavs): device = wavs[0].device outputs = [] for wav in wavs: feature = waveform_to_examples(wav.detach().cpu().numpy()) feature = self.model(feature.to(device)) if (feature.dim() == 1): feature = feature.unsqueeze(0) outputs.append(feature) outputs = pad_sequence(outputs, batch_first=True) return {'last_hidden_state': outputs, 'hidden_states': [outputs]}
def _load_state_dict_from_url(url): path = _urls_to_filepaths(url) return torch.load(path, map_location='cpu')
def _vggish_from_torch_hub(urls, *args, **kwargs): '\n The model from `torch.hub.load`\n urls (dict): LINKS\n ' kwargs['ckpt'] = {'vggish': _load_state_dict_from_url(urls['vggish']), 'pca': _load_state_dict_from_url(urls['pca'])} return _UpstreamExpert(*args, **kwargs)
def vggish(*args, **kwargs): '\n The default model\n ' urls = {'vggish': 'https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish-10086976.pth', 'pca': 'https://github.com/harritaylor/torchvggish/releases/download/v0.1/vggish_pca_params-970ea276.pth'} return _vggish_from_torch_hub(urls, *args, **kwargs)
class VGG(nn.Module): def __init__(self, features): super(VGG, self).__init__() self.features = features self.embeddings = nn.Sequential(nn.Linear(((512 * 4) * 6), 4096), nn.ReLU(True), nn.Linear(4096, 4096), nn.ReLU(True), nn.Linear(4096, 128), nn.ReLU(True)) def forward(self, x): x = self.features(x) x = torch.transpose(x, 1, 3) x = torch.transpose(x, 1, 2) x = x.contiguous() x = x.view(x.size(0), (- 1)) return self.embeddings(x)
class Postprocessor(nn.Module): '\n Post-processes VGGish embeddings. Returns a torch.Tensor instead of a\n numpy array in order to preserve the gradient.\n "The initial release of AudioSet included 128-D VGGish embeddings for each\n segment of AudioSet. These released embeddings were produced by applying\n a PCA transformation (technically, a whitening transform is included as well)\n and 8-bit quantization to the raw embedding output from VGGish, in order to\n stay compatible with the YouTube-8M project which provides visual embeddings\n in the same format for a large set of YouTube videos. This class implements\n the same PCA (with whitening) and quantization transformations."\n ' def __init__(self): 'Constructs a postprocessor.' super(Postprocessor, self).__init__() self.pca_eigen_vectors = torch.empty((vggish_params.EMBEDDING_SIZE, vggish_params.EMBEDDING_SIZE), dtype=torch.float) self.pca_means = torch.empty((vggish_params.EMBEDDING_SIZE, 1), dtype=torch.float) self.pca_eigen_vectors = nn.Parameter(self.pca_eigen_vectors, requires_grad=False) self.pca_means = nn.Parameter(self.pca_means, requires_grad=False) def postprocess(self, embeddings_batch): 'Applies tensor postprocessing to a batch of embeddings.\n Args:\n embeddings_batch: An tensor of shape [batch_size, embedding_size]\n containing output from the embedding layer of VGGish.\n Returns:\n A tensor of the same shape as the input, containing the PCA-transformed,\n quantized, and clipped version of the input.\n ' assert (len(embeddings_batch.shape) == 2), ('Expected 2-d batch, got %r' % (embeddings_batch.shape,)) assert (embeddings_batch.shape[1] == vggish_params.EMBEDDING_SIZE), ('Bad batch shape: %r' % (embeddings_batch.shape,)) pca_applied = torch.mm(self.pca_eigen_vectors, (embeddings_batch.t() - self.pca_means)).t() clipped_embeddings = torch.clamp(pca_applied, vggish_params.QUANTIZE_MIN_VAL, vggish_params.QUANTIZE_MAX_VAL) quantized_embeddings = torch.round(((clipped_embeddings - vggish_params.QUANTIZE_MIN_VAL) * (255.0 / (vggish_params.QUANTIZE_MAX_VAL - vggish_params.QUANTIZE_MIN_VAL)))) return torch.squeeze(quantized_embeddings) def forward(self, x): return self.postprocess(x)
def make_layers(): layers = [] in_channels = 1 for v in [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M']: if (v == 'M'): layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
class VGGish(VGG): def __init__(self, ckpt, pretrained=True, postprocess=True, progress=True, **kwargs): super().__init__(make_layers()) if pretrained: super().load_state_dict(ckpt['vggish']) self.postprocess = postprocess if self.postprocess: self.pproc = Postprocessor() if pretrained: ckpt['pca'][vggish_params.PCA_EIGEN_VECTORS_NAME] = torch.as_tensor(ckpt['pca'][vggish_params.PCA_EIGEN_VECTORS_NAME], dtype=torch.float) ckpt['pca'][vggish_params.PCA_MEANS_NAME] = torch.as_tensor(ckpt['pca'][vggish_params.PCA_MEANS_NAME].reshape((- 1), 1), dtype=torch.float) self.pproc.load_state_dict(ckpt['pca']) def forward(self, x): x = super().forward(x) if self.postprocess: x = self._postprocess(x) return x def _postprocess(self, x): return self.pproc(x)
def vq_apc(refresh=False, *args, **kwargs): '\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n ' return vq_apc_360hr(*args, refresh=refresh, **kwargs)
def vq_apc_360hr(refresh=False, *args, **kwargs): '\n The vq-apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/vq_apc_360hr.ckpt' return vq_apc_url(*args, refresh=refresh, **kwargs)
def vq_apc_960hr(refresh=False, *args, **kwargs): '\n The vq-apc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/vq_apc_960hr.ckpt' return vq_apc_url(*args, refresh=refresh, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)
def load_converted_model(ckpt: str): ckpt_state = torch.load(ckpt, map_location='cpu') for required_key in ['task_cfg', 'model_cfg', 'model_weight']: if (required_key not in ckpt_state): raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing') task_cfg = merge_with_parent(AudioPretrainingConfig, ckpt_state['task_cfg']) model_cfg = merge_with_parent(Wav2VecConfig, ckpt_state['model_cfg']) model = Wav2VecModel(model_cfg) model.load_state_dict(ckpt_state['model_weight']) return (model, task_cfg)
def vq_wav2vec_custom(ckpt: str, *args, legacy: bool=False, refresh: bool=False, **kwargs): if ckpt.startswith('http'): ckpt = _urls_to_filepaths(ckpt, refresh=refresh) assert os.path.isfile(ckpt) if legacy: return _LegacyUpstreamExpert(ckpt, *args, **kwargs) else: return _UpstreamExpert(ckpt, *args, **kwargs)
def wav2vec2_local(*args, **kwargs): return vq_wav2vec_custom(*args, **kwargs)
def wav2vec2_url(*args, **kwargs): return vq_wav2vec_custom(*args, **kwargs)
def vq_wav2vec(refresh=False, *args, **kwargs): '\n The default model - Large model with context vector\n refresh (bool): whether to download ckpt/config again if existed\n ' return vq_wav2vec_gumbel(*args, refresh=refresh, **kwargs)
def vq_wav2vec_gumbel(refresh=False, legacy=False, **kwargs): '\n The Gumbel model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq-wav2vec.pt' return vq_wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def vq_wav2vec_kmeans(refresh=False, legacy=False, **kwargs): '\n The K-means model\n refresh (bool): whether to download ckpt/config again if existed\n ' kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/fairseq/wav2vec/vq-wav2vec_kmeans.pt' if (not legacy): kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/vq-wav2vec_kmeans.pt' return vq_wav2vec_custom(refresh=refresh, legacy=legacy, **kwargs)
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None): (state, cfg) = load_fairseq_ckpt(fairseq_source) output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model']} if (output_path is not None): Path(output_path).parent.mkdir(exist_ok=True, parents=True) torch.save(output_state, output_path)