code stringlengths 17 6.64M |
|---|
class TransformerModel(TransformerInitModel):
"Transformer model.\n\n Params:\n `config`: a TransformerConfig class instance with the configuration to build a new model\n `intput_dim`: int, input dimension of model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `spec_input`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n `pos_enc`: a torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states\n at the end of each attention block, each encoded-hidden-state is a torch.FloatTensor\n of size [batch_size, sequence_length, hidden_size], i.e [num_hidden_layers, batch_size, sequence_length, hidden_size]\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size].\n\n\n Example usage:\n ```python\n spec_input = torch.LongTensor(spec_frames)\n pos_enc = torch.LongTensor(position_encoding(seq_len=len(spec_frames)))\n\n config = TransformerConfig(config)\n\n model = TransformerForMaskedLM(config)\n masked_spec_logits = model(spec_input, pos_enc)\n ```\n "
def __init__(self, config, input_dim, output_attentions=False, keep_multihead_output=False, with_input_module=True):
super(TransformerModel, self).__init__(config, output_attentions)
self.with_input_module = with_input_module
if self.with_input_module:
self.input_representations = TransformerInputRepresentations(config, input_dim)
self.encoder = TransformerEncoder(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.apply(self.init_Transformer_weights)
def prune_heads(self, heads_to_prune):
'Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n '
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
'Gather all multi-head outputs.\n Return: list (layers) of multihead module outputs with gradients\n '
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, spec_input, pos_enc=None, attention_mask=None, output_all_encoded_layers=True, head_mask=None):
if (attention_mask is None):
attention_mask = torch.ones_like(spec_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=spec_input.dtype)
extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0))
if (head_mask is not None):
if (head_mask.dim() == 1):
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.expand_as(self.config.num_hidden_layers, (- 1), (- 1), (- 1), (- 1))
elif (head_mask.dim() == 2):
head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.to(dtype=spec_input.dtype)
else:
head_mask = ([None] * self.config.num_hidden_layers)
if self.with_input_module:
input_representations = self.input_representations(spec_input, pos_enc)
else:
input_representations = spec_input
encoded_layers = self.encoder(input_representations, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, head_mask=head_mask)
if self.output_attentions:
(all_attentions, encoded_layers) = encoded_layers
if (not output_all_encoded_layers):
encoded_layers = encoded_layers[(- 1)]
if self.output_attentions:
return (all_attentions, encoded_layers)
return encoded_layers
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt: str=None, model_config: str=None, **kwargs):
'\n Args:\n ckpt:\n The checkpoint path for loading your pretrained weights.\n Can be assigned by the -k option in run_downstream.py\n\n model_config:\n The config path for constructing your model.\n Might not needed if you also save that in your checkpoint file.\n Can be assigned by the -g option in run_downstream.py\n '
super().__init__(**kwargs)
self.checkpoint = torch.load(ckpt, map_location='cpu')
self.upstream_type = kwargs['upstream']
self.mos_upstream = self._get_mos_upstream()
self.mos_featurizer = self._get_mos_featurizer()
self.mos_downstream = self._get_mos_downstream()
self.segments_durations = 1
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor], Dict[(str, Tensor)])])]:
'\n When the returning Dict contains the List or Dict with more than one Tensor,\n those Tensors should be in the same shape if one wished to weighted sum them.\n '
wavs_segments = [self.preprocessor(wav) for wav in wavs]
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
features = self.mos_upstream(flattened_wavs_segments)
features = self.mos_featurizer(flattened_wavs_segments, features)
features = torch.stack(features)
segments_scores = self.mos_downstream(features)
scores = []
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
scores.append(current_segment_scores.mean(dim=(- 1)))
scores = torch.FloatTensor(scores)
return {'scores': scores}
def preprocessor(self, wav):
wav_segments = unfold_segments(wav, self.segments_durations)
return wav_segments
def _get_mos_upstream(self):
mos_upstream = getattr(s3prl.hub, self.upstream_type)()
if (self.upstream_type == 'tera'):
self.checkpoint['Upstream']['transformer.extracter._melscale.fb'] = torch.tensor([])
mos_upstream.load_state_dict(self.checkpoint['Upstream'])
return mos_upstream
def _get_mos_featurizer(self):
return Featurizer(self.mos_upstream, upstream_device='cpu')
def _get_mos_downstream(self):
mos_downstream = MosDownstream(upstream_dim=self.mos_featurizer.output_dim, projector_dim=self.checkpoint['Config']['downstream_expert']['modelrc']['projector_dim'], clipping=self.checkpoint['Config']['downstream_expert']['modelrc']['clipping'], attention_pooling=self.checkpoint['Config']['downstream_expert']['modelrc']['attention_pooling'])
mos_downstream.load_state_dict(self.checkpoint['Downstream'])
return mos_downstream
|
def mos_wav2vec2_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'wav2vec2'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_wav2vec2_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_wav2vec2_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_wav2vec2(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/s9zpouk5svu1a4l/wav2vec2-dev-SRCC-best.ckpt?dl=1'
return mos_wav2vec2_url(*args, refresh=refresh, **kwargs)
|
def mos_tera_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'tera'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_tera_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_tera_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_tera(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/w4jk5bujaoosk69/tera-dev-SRCC-best.ckpt?dl=1'
return mos_tera_url(*args, refresh=refresh, **kwargs)
|
def mos_apc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'apc'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_apc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_apc_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_apc(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/ulng31as15hsvz1/apc-dev-SRCC-best.ckpt?dl=1'
return mos_apc_url(*args, refresh=refresh, **kwargs)
|
class MosDownstream(nn.Module):
def __init__(self, upstream_dim, projector_dim, clipping, attention_pooling):
super(MosDownstream, self).__init__()
self.connector = nn.Linear(upstream_dim, projector_dim)
self.model = MosDownstreamModule(input_dim=projector_dim, clipping=clipping, attention_pooling=attention_pooling)
def forward(self, features):
features = self.connector(features)
scores = self.model(features)
return scores
|
class MosDownstreamModule(nn.Module):
def __init__(self, input_dim, clipping=False, attention_pooling=False, num_judges=5000, **kwargs):
super(MosDownstreamModule, self).__init__()
self.mean_net_linear = nn.Linear(input_dim, 1)
self.mean_net_clipping = clipping
self.mean_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.bias_net_linear = nn.Linear(input_dim, 1)
self.bias_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.judge_embbeding = nn.Embedding(num_embeddings=num_judges, embedding_dim=input_dim)
def forward(self, features, judge_ids=None):
if (self.mean_net_pooling is not None):
x = self.mean_net_pooling(features)
segment_score = self.mean_net_linear(x)
else:
x = self.mean_net_linear(features)
segment_score = x.squeeze((- 1)).mean(dim=(- 1))
if self.mean_net_clipping:
segment_score = ((torch.tanh(segment_score) * 2) + 3)
if (judge_ids is None):
return segment_score.squeeze((- 1))
else:
time = features.shape[1]
judge_features = self.judge_embbeding(judge_ids)
judge_features = torch.stack([judge_features for i in range(time)], dim=1)
bias_features = (features + judge_features)
if (self.bias_net_pooling is not None):
y = self.bias_net_pooling(bias_features)
bias_score = self.bias_net_linear(y)
else:
y = self.bias_net_linear(bias_features)
bias_score = y.squeeze((- 1)).mean(dim=(- 1))
bias_score = (bias_score + segment_score)
return (segment_score.squeeze((- 1)), bias_score.squeeze((- 1)))
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n\n attention_weight:\n att_w : size (N, T, 1)\n\n return:\n utter_rep: size (N, H)\n '
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze((- 1))).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
def unfold_segments(tensor, tgt_duration, sample_rate=16000):
seg_lengths = int((tgt_duration * sample_rate))
src_lengths = len(tensor)
step = (seg_lengths // 2)
tgt_lengths = (seg_lengths if (src_lengths <= seg_lengths) else (((src_lengths // step) + 1) * step))
pad_lengths = (tgt_lengths - src_lengths)
padded_tensor = torch.cat([tensor, torch.zeros(pad_lengths).to(tensor.device)])
segments = padded_tensor.unfold(0, seg_lengths, step).unbind(0)
return segments
|
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None):
from fairseq.data.dictionary import Dictionary
(state, cfg) = load_fairseq_ckpt(fairseq_source)
dicts: List[Dictionary] = state['task_state']['dictionaries']
symbols = [dictionary.symbols for dictionary in dicts]
output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'dictionaries_symbols': symbols}
if (output_path is not None):
Path(output_path).parent.mkdir(exist_ok=True, parents=True)
torch.save(output_state, output_path)
|
def load_converted_model(ckpt: str):
ckpt_state = torch.load(ckpt, map_location='cpu')
for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'dictionaries_symbols']:
if (required_key not in ckpt_state):
raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing')
task_cfg = merge_with_parent(MultiresHubertPretrainingConfig, ckpt_state['task_cfg'])
model_cfg = merge_with_parent(MultiresHubertConfig, ckpt_state['model_cfg'])
model = MultiresHubertModel(model_cfg, task_cfg, ckpt_state['dictionaries_symbols'])
model.load_state_dict(ckpt_state['model_weight'])
return (model, task_cfg)
|
def multires_hubert_custom(ckpt: str, refresh: bool=False, **kwargs):
if ckpt.startswith('http'):
ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt=ckpt, **kwargs)
|
def multires_hubert_local(*args, **kwargs):
return multires_hubert_custom(*args, **kwargs)
|
def multires_hubert_base(refresh=False, **kwargs):
'\n The monolingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/mrhubert_mono_base.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_large(refresh=False, **kwargs):
'\n The monolingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/mrhubert_mono_large.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_base(refresh=False, **kwargs):
'\n The multilingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_base.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_large400k(refresh=False, **kwargs):
'\n The multilingual large model (400k steps)\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_large_400k.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_large600k(refresh=False, **kwargs):
'\n The multilingual large model (600k steps)\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_large_600k.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if (mode != 'global'):
raise NotImplementedError('Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if (self.mode == 'global'):
return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True)))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
|
class FeatureExtractor(nn.Module):
'Feature extractor, transforming file path to Mel spectrogram'
def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs):
super(FeatureExtractor, self).__init__()
assert (mode == 'fbank'), 'Only Mel-spectrogram implemented'
self.mode = mode
self.extract_fn = kaldi.fbank
self.apply_cmvn = apply_cmvn
if self.apply_cmvn:
self.cmvn = CMVN()
self.num_mel_bins = num_mel_bins
self.kwargs = kwargs
self.decode_wav = decode_wav
if self.decode_wav:
torchaudio.set_audio_backend('soundfile')
def _load_file(self, filepath):
if self.decode_wav:
(waveform, sample_rate) = torchaudio.load_wav(filepath)
else:
(waveform, sample_rate) = torchaudio.load(filepath)
return (waveform, sample_rate)
def forward(self, waveform):
y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs)
if self.apply_cmvn:
y = y.transpose(0, 1).unsqueeze(0)
y = self.cmvn(y)
y = y.squeeze(0).transpose(0, 1)
return y
def extra_repr(self):
return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins)
def create_msg(self):
'List msg for verbose function'
msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn)
return [msg]
|
def create_transform(audio_config):
feat_type = audio_config.pop('feat_type')
feat_dim = audio_config.pop('feat_dim')
decode_wav = audio_config.pop('decode_wav', False)
apply_cmvn = audio_config.pop('cmvn', True)
transforms = FeatureExtractor(feat_type, feat_dim, decode_wav, apply_cmvn, **audio_config)
return (transforms, feat_dim)
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
ckpt = torch.load(ckpt, map_location='cpu')
config = ckpt['config']
(self.preprocessor, feat_dim) = create_transform(config['data']['audio'])
self.model = NPC(feat_dim, **config['model']['paras'])
self.model.load_state_dict(ckpt['model'])
if (len(self.hooks) == 0):
for (block_id, _) in enumerate(self.model.blocks):
self.add_hook(f'self.model.blocks[{block_id}]', (lambda input, output: output.transpose(1, 2)))
for (masked_conv_id, _) in enumerate(self.model.masked_convs):
self.add_hook(f'self.model.masked_convs[{masked_conv_id}]', (lambda input, output: output))
self.add_hook('self.model', (lambda input, output: output[1]))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
features = [self.preprocessor(wav.unsqueeze(0)) for wav in wavs]
features = pad_sequence(features, batch_first=True)
(predicted_BxLxM, features) = self.model(features, testing=(not self.training))
|
def npc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def npc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return npc_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def npc(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return npc_360hr(*args, refresh=refresh, **kwargs)
|
def npc_360hr(refresh=False, *args, **kwargs):
'\n The npc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/npc_360hr.ckpt'
return npc_url(*args, refresh=refresh, **kwargs)
|
def npc_960hr(refresh=False, *args, **kwargs):
'\n The npc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/npc_960hr.ckpt'
return npc_url(*args, refresh=refresh, **kwargs)
|
class VQLayer(nn.Module):
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Defines a VQ layer that follows an RNN layer.\n input_size: an int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size: an int indicating the number of codes.\n code_dim: an int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature: a float indicating the temperature for gumbel-softmax.\n '
super(VQLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing, lens=None):
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return (logits_BxLxC, codes_BxLxE)
def report_ppx(self):
'Computes perplexity of distribution over codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'Computes usage each entry in codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, model_config, **kwargs):
super().__init__(**kwargs)
try:
from pase.models.frontend import wf_builder
except ModuleNotFoundError:
logger.error('Please check https://github.com/s3prl/s3prl/blob/master/s3prl/upstream/pase/README.md for how to install the dependencies of PASE+.')
raise
def build_pase(ckpt, model_config):
pase = wf_builder(model_config)
pase.load_pretrained(ckpt, load_last=True, verbose=False)
return pase
self.model = build_pase(ckpt, model_config)
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
wavs = pad_sequence(wavs, batch_first=True)
wavs = wavs.unsqueeze(1)
features = self.model(wavs)
features = features.transpose(1, 2).contiguous()
|
class UpstreamExpert(nn.Module):
def __init__(self, name: str, refresh=False, window_secs: float=0.16, stride_secs: float=0.05):
super().__init__()
self.resampler = torchaudio.transforms.Resample(16000, 32000)
self.module = importlib.import_module(f'.hear21passt.{name}', __package__)
self.model = self.module.load_model(timestamp_window=(window_secs * 1000), timestamp_hop=(stride_secs * 1000))
self.stride_secs = stride_secs
def get_downsample_rates(self, key=None):
return int((self.stride_secs * SAMPLE_RATE))
def forward(self, wavs: List[torch.Tensor]):
wavs = pad_sequence(wavs, batch_first=True)
wavs = self.resampler(wavs)
(embs, timestamps) = self.module.get_timestamp_embeddings(wavs, self.model)
return {'hidden_states': [embs]}
|
def embeding_size(hop=50, embeding_size=1000):
embedings = ((20 * 60) * (1000 / hop))
return (((embedings * embeding_size) * 4) / ((1024 * 1024) * 1024))
|
def load_model(model_path=''):
model = get_concat_2levelmel_model()
if torch.cuda.is_available():
model.cuda()
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
model.eval()
with torch.no_grad():
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvlmel_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
return model.get_timestamp_embeddings(audio)
|
def get_basic_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net)
return model
|
def get_concat_2level_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2))
return model
|
def get_2lvl_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def get_concat_2levelmel_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)))
return model
|
def get_2lvlmel_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=1920)
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 4))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', **kwds):
model = get_basic_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, **kwargs)
return model
|
def load_model(model_path='', mode='all', scene_hop=5000, **kwds):
'\n scene_hop: The hop size for the ovelaping windows\n in case the scene audio lenght is larger than 20 seconds.\n Returns:\n model: wrapped PaSST model that can take up to 20 seconds\n of audio without averaging the embeddings.\n '
model = get_basic_model(mode=mode, scene_hop=scene_hop, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_20sec', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, max_model_window=20000, **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_concat_2level_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvl_timestamp_embeddings(audio, model)
|
def get_concat_2level_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs)
return model
|
def get_2lvl_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', **kwds):
model = get_concat_2levelmel_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvlmel_timestamp_embeddings(audio, model)
|
def get_concat_2levelmel_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs)
return model
|
def get_2lvlmel_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=1920)
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 4))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', scene_hop=10000, **kwds):
'\n scene_hop: The hop size for the ovelaping windows\n in case the scene audio lenght is larger than 30 seconds.\n Returns:\n model: wrapped PaSST model that can take up to 30 seconds\n of audio without averaging the embeddings.\n '
model = get_basic_model(mode=mode, scene_hop=scene_hop, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_30sec', input_tdim=3000)
model = PasstBasicWrapper(mel=mel, net=net, max_model_window=30000, **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_basic_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop100', input_tdim=3200)
model = PasstBasicWrapper(mel=mel, net=net, **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_2lvl_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def get_2lvl_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop100', input_tdim=3200)
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_2lvl_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=(6 * 100))
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def get_2lvl_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=100, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop100', input_tdim=3200)
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_basic_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop160', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_2lvl_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def get_2lvl_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop160', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_2lvl_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=(6 * 160))
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def get_2lvl_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=160, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='stfthop160', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs)
return model
|
class AugmentMelSTFT(nn.Module):
def __init__(self, n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=1, fmax_aug_range=1000):
torch.nn.Module.__init__(self)
self.win_length = win_length
self.n_mels = n_mels
self.n_fft = n_fft
self.sr = sr
self.htk = htk
self.fmin = fmin
if (fmax is None):
fmax = ((sr // 2) - (fmax_aug_range // 2))
self.fmax = fmax
self.norm = norm
self.hopsize = hopsize
self.register_buffer('window', torch.hann_window(win_length, periodic=False), persistent=False)
assert (fmin_aug_range >= 1), f'fmin_aug_range={fmin_aug_range} should be >=1; 1 means no augmentation'
assert (fmin_aug_range >= 1), f'fmax_aug_range={fmax_aug_range} should be >=1; 1 means no augmentation'
self.fmin_aug_range = fmin_aug_range
self.fmax_aug_range = fmax_aug_range
self.register_buffer('preemphasis_coefficient', torch.as_tensor([[[(- 0.97), 1]]]), persistent=False)
if (freqm == 0):
self.freqm = torch.nn.Identity()
else:
self.freqm = torchaudio.transforms.FrequencyMasking(freqm, iid_masks=True)
if (timem == 0):
self.timem = torch.nn.Identity()
else:
self.timem = torchaudio.transforms.TimeMasking(timem, iid_masks=True)
def forward(self, x):
x = nn.functional.conv1d(x.unsqueeze(1), self.preemphasis_coefficient).squeeze(1)
x = torch.stft(x, self.n_fft, hop_length=self.hopsize, win_length=self.win_length, center=True, normalized=False, window=self.window, return_complex=True)
x = torch.view_as_real(x)
x = (x ** 2).sum(dim=(- 1))
fmin = (self.fmin + torch.randint(self.fmin_aug_range, (1,)).item())
fmax = ((self.fmax + (self.fmax_aug_range // 2)) - torch.randint(self.fmax_aug_range, (1,)).item())
if (not self.training):
fmin = self.fmin
fmax = self.fmax
(mel_basis, _) = torchaudio.compliance.kaldi.get_mel_banks(self.n_mels, self.n_fft, self.sr, fmin, fmax, vtln_low=100.0, vtln_high=(- 500.0), vtln_warp_factor=1.0)
mel_basis = torch.as_tensor(torch.nn.functional.pad(mel_basis, (0, 1), mode='constant', value=0), device=x.device)
with torch.cuda.amp.autocast(enabled=False):
melspec = torch.matmul(mel_basis, x)
melspec = (melspec + 1e-05).log()
if self.training:
melspec = self.freqm(melspec)
melspec = self.timem(melspec)
melspec = ((melspec + 4.5) / 5.0)
return melspec
def extra_repr(self):
return 'winsize={}, hopsize={}'.format(self.win_length, self.hopsize)
|
def load_model(model_path='', mode='all', **kwds):
model = get_basic_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.