code stringlengths 17 6.64M |
|---|
@dataclass
class MAE_AST_Pretraining_Config():
data: str = field(default=MISSING, metadata={'help': 'path to data directory'})
sample_rate: int = field(default=16000, metadata={'help': 'target sample rate. audio files will be up/down sampled to this rate'})
normalize: bool = field(default=False, metadata={'help': 'if set, normalizes input to have 0 mean and unit variance'})
enable_padding: bool = field(default=False, metadata={'help': 'pad shorter samples instead of cropping'})
max_keep_size: Optional[int] = field(default=None, metadata={'help': 'exclude sample longer than this'})
max_sample_size: Optional[int] = field(default=None, metadata={'help': 'max sample size to crop to for batching'})
min_sample_size: Optional[int] = field(default=None, metadata={'help': 'min sample size to crop to for batching'})
random_crop: Optional[bool] = field(default=True, metadata={'help': 'always crop from the beginning if false'})
pad_audio: Optional[bool] = field(default=False, metadata={'help': 'pad audio to the longest one in the batch if true'})
feature_type: Optional[str] = field(default='wav', metadata={'help': "choose from ['wav', 'spectrogram', 'fbank', 'mfcc']"})
feature_rate: Optional[int] = field(default=100, metadata={'help': 'rate of feature input to the transformer, if use wav, this arg is omited, else if use spectrogram/fbank/mfcc, the default is 100, i.e. 1s audio gives 100 frames. the label rate of using MFCC is also 100'})
feature_dim: Optional[int] = field(default=100, metadata={'help': 'dim feature input to the transformer, if use wav, this arg is omited, else if use spectrogram/fbank/mfcc, the default is 80'})
deltas: Optional[bool] = field(default=True, metadata={'help': 'whether or not add delta and delta-delta to the feature, only effective for spectrogram/fbank/mfcc'})
mask_spans: Optional[bool] = field(default=False, metadata={'help': 'mask random spans, same as that is used in HuBERT and w2v2'})
mask_type: MASK_TYPE_CHOICES = field(default='random_mask', metadata={'help': 'Determine type of mask for MAE pretraining. \n -retain_spans: Only for frame data. Wav2Vec2 like masking.\n -random_mask: Perform masking on completely random tokens. No chunking. Used in MAE\n -random_mask_batched: random_mask with the same mask across the batch.\n -chunk_mask: Perform masking on chunks until mask_spans hit. From SSAST. Same across batch for speed.\n '})
|
class MAE_AST_Pretraining_Task():
cfg: MAE_AST_Pretraining_Config
def __init__(self, cfg: MAE_AST_Pretraining_Config) -> None:
super().__init__(cfg)
logger.info(f'current directory is {os.getcwd()}')
logger.info(f'MAEPretrainingTask Config {cfg}')
self.cfg = cfg
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
@property
def dictionaries(self):
return None
@classmethod
def setup_task(cls, cfg: MAE_AST_Pretraining_Config, **kwargs) -> 'MAE_AST_Pretraining_Task':
return cls(cfg)
def load_dataset(self, split: str, **kwargs) -> None:
raise NotImplementedError('This part is not used thus not included in S3PRL, please visit the official repo.')
def max_positions(self) -> Tuple[(int, int)]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array:
return indices
|
class UpstreamExpert(UpstreamBase):
'\n The Mockingjay wrapper\n '
def __init__(self, ckpt, options_config=None, **kwargs):
super().__init__(**kwargs)
if (options_config is not None):
print('[UpstreamExpert] - Using upstream expert config file from:', options_config)
with open(options_config, 'r') as file:
options = yaml.load(file, Loader=yaml.FullLoader)
else:
print('[UpstreamExpert] - Using the default upstream expert config')
options = {'load_pretrain': 'True', 'no_grad': 'False', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'output_hidden_states': 'True', 'permute_input': 'False'}
options['ckpt_file'] = ckpt
options['select_layer'] = (- 1)
self.transformer = PretrainedTransformer(options, inp_dim=(- 1))
assert hasattr(self.transformer, 'extracter'), 'This wrapper only supports `on-the-fly` ckpt with built in feature extracters.'
self.transformer([torch.randn(16000)])
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
(last_hidden_state, hidden_states) = self.transformer(wavs)
return {'last_hidden_state': last_hidden_state, 'hidden_states': hidden_states.unbind(dim=0)}
|
def mockingjay_local(ckpt, options_config=None, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n '
assert os.path.isfile(ckpt)
if (options_config is not None):
assert os.path.isfile(options_config)
return _UpstreamExpert(ckpt, options_config, *args, **kwargs)
|
def mockingjay_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mockingjay_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def mockingjay(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_origin(*args, refresh=refresh, **kwargs)
|
def mockingjay_origin(refresh=False, *args, **kwargs):
'\n The mockingjay large model on 360hr, with Lel as input and Linear as target\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(*args, refresh=refresh, **kwargs)
|
def mockingjay_100hr(refresh=False, *args, **kwargs):
'\n The mockingjay base model on 100hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelBase_T_AdamW_b32_200k_100hr(*args, refresh=refresh, **kwargs)
|
def mockingjay_960hr(refresh=False, *args, **kwargs):
'\n The mockingjay base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/luorglf8mdg67l2/states-200000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel (input) / 201-dim Linear (target)\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 500k\n Unlabled Speech: 360hr\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mockingjay/resolve/main/mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1/states-500000.ckpt'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/jzx0xggk663jev6/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/7f9z6dzc7oix6qv/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: sequence length of 3k (instead of 1.5k)\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/qnnvdrai2tfmjmh/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
class TransformerConfig(object):
'Configuration class to store the configuration of a `TransformerModel`.'
def __init__(self, config):
self.hidden_size = int(config['hidden_size'])
self.num_hidden_layers = int(config['num_hidden_layers'])
self.num_attention_heads = int(config['num_attention_heads'])
self.hidden_act = str(config['hidden_act'])
self.intermediate_size = int(config['intermediate_size'])
self.hidden_dropout_prob = float(config['hidden_dropout_prob'])
self.attention_probs_dropout_prob = float(config['attention_probs_dropout_prob'])
self.initializer_range = float(config['initializer_range'])
self.layer_norm_eps = float(config['layer_norm_eps'])
self.share_layer = bool(config['share_layer'])
self.pre_layer_norm = bool(config['pre_layer_norm'])
|
def prune_linear_layer(layer, index, dim=0):
'Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n '
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if (layer.bias is not None):
if (dim == 1):
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=(layer.bias is not None)).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if (layer.bias is not None):
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
|
def gelu(x):
"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class TransformerLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'Construct a layernorm module in the TF style (epsilon inside the square root).'
super(TransformerLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class TransformerInputRepresentations(nn.Module):
'Construct the input representation from spectrogram, and position encodings.'
def __init__(self, config, input_dim):
super(TransformerInputRepresentations, self).__init__()
self.hidden_size = config.hidden_size
self.spec_transform = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, spec, pos_enc):
spec_transformed = self.spec_transform(spec)
input_representations = (spec_transformed + pos_enc)
input_representations = self.LayerNorm(input_representations)
input_representations = self.dropout(input_representations)
return input_representations
|
class TransformerSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerSelfAttention, self).__init__()
if ((config.hidden_size % config.num_attention_heads) != 0):
raise ValueError(('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int((config.hidden_size / config.num_attention_heads))
self.all_head_size = (self.num_attention_heads * self.attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_scores = (attention_scores + attention_mask)
attention_probs = nn.Softmax(dim=(- 1))(attention_scores)
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return (attention_probs, context_layer)
return context_layer
|
class TransformerSelfOutput(nn.Module):
def __init__(self, config):
super(TransformerSelfOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerAttention, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.self = TransformerSelfAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.output = TransformerSelfOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def prune_heads(self, heads):
if (len(heads) == 0):
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view((- 1)).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = (self.self.num_attention_heads - len(heads))
self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
def forward(self, input_tensor, attention_mask, head_mask=None):
if self.pre_layer_norm:
self_output = self.LayerNorm(input_tensor)
self_output = self.self(self_output, attention_mask, head_mask)
else:
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
(attentions, self_output) = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return (attentions, attention_output)
return attention_output
|
class TransformerIntermediate(nn.Module):
def __init__(self, config):
super(TransformerIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TransformerOutput(nn.Module):
def __init__(self, config):
super(TransformerOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerLayer, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.attention = TransformerAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.intermediate = TransformerIntermediate(config)
self.output = TransformerOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
(attentions, attention_output) = attention_output
if self.pre_layer_norm:
intermediate_output = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(intermediate_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return (attentions, layer_output)
return layer_output
|
class TransformerEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerEncoder, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
layer = TransformerLayer(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
if config.share_layer:
self.layer = nn.ModuleList([layer for _ in range(config.num_hidden_layers)])
else:
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if self.pre_layer_norm:
LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = nn.ModuleList([copy.deepcopy(LayerNorm) for _ in range((config.num_hidden_layers + 1))])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for (i, layer_module) in enumerate(self.layer):
if output_all_encoded_layers:
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[i](hidden_states))
else:
all_encoder_layers.append(hidden_states)
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
(attentions, hidden_states) = hidden_states
all_attentions.append(attentions)
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[(- 1)](hidden_states))
else:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return (all_attentions, all_encoder_layers)
return all_encoder_layers
|
class TransformerSpecPredictionHead(nn.Module):
def __init__(self, config, output_dim, input_dim=None):
super(TransformerSpecPredictionHead, self).__init__()
self.output_dim = output_dim
if (input_dim is None):
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(input_dim, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.output = nn.Linear(config.hidden_size, self.output_dim)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
linear_output = self.output(hidden_states)
return (linear_output, hidden_states)
|
class TransformerInitModel(nn.Module):
'An abstract class to handle weights initialization.'
def __init__(self, config, output_attentions, *inputs, **kwargs):
super(TransformerInitModel, self).__init__()
self.config = config
self.output_attentions = output_attentions
def init_Transformer_weights(self, module):
'Initialize the weights.'
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, TransformerLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
class TransformerModel(TransformerInitModel):
"Transformer model.\n\n Params:\n `config`: a TransformerConfig class instance with the configuration to build a new model\n `intput_dim`: int, input dimension of model\n `output_attentions`: If True, also output attentions weights computed by the model at each layer. Default: False\n `keep_multihead_output`: If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics. Default: False\n\n Inputs:\n `spec_input`: a torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n `pos_enc`: a torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.\n `head_mask`: an optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n\n\n Outputs: Tuple of (encoded_layers, pooled_output)\n `encoded_layers`: controled by `output_all_encoded_layers` argument:\n - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states\n at the end of each attention block, each encoded-hidden-state is a torch.FloatTensor\n of size [batch_size, sequence_length, hidden_size], i.e [num_hidden_layers, batch_size, sequence_length, hidden_size]\n - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size].\n\n\n Example usage:\n ```python\n spec_input = torch.LongTensor(spec_frames)\n pos_enc = torch.LongTensor(position_encoding(seq_len=len(spec_frames)))\n\n config = TransformerConfig(config)\n\n model = TransformerForMaskedLM(config)\n masked_spec_logits = model(spec_input, pos_enc)\n ```\n "
def __init__(self, config, input_dim, output_attentions=False, keep_multihead_output=False, with_input_module=True):
super(TransformerModel, self).__init__(config, output_attentions)
self.with_input_module = with_input_module
if self.with_input_module:
self.input_representations = TransformerInputRepresentations(config, input_dim)
self.encoder = TransformerEncoder(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.apply(self.init_Transformer_weights)
def prune_heads(self, heads_to_prune):
'Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n '
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
'Gather all multi-head outputs.\n Return: list (layers) of multihead module outputs with gradients\n '
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, spec_input, pos_enc=None, attention_mask=None, output_all_encoded_layers=True, head_mask=None):
if (attention_mask is None):
attention_mask = torch.ones_like(spec_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=spec_input.dtype)
extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0))
if (head_mask is not None):
if (head_mask.dim() == 1):
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.expand_as(self.config.num_hidden_layers, (- 1), (- 1), (- 1), (- 1))
elif (head_mask.dim() == 2):
head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.to(dtype=spec_input.dtype)
else:
head_mask = ([None] * self.config.num_hidden_layers)
if self.with_input_module:
input_representations = self.input_representations(spec_input, pos_enc)
else:
input_representations = spec_input
encoded_layers = self.encoder(input_representations, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, head_mask=head_mask)
if self.output_attentions:
(all_attentions, encoded_layers) = encoded_layers
if (not output_all_encoded_layers):
encoded_layers = encoded_layers[(- 1)]
if self.output_attentions:
return (all_attentions, encoded_layers)
return encoded_layers
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt: str=None, model_config: str=None, **kwargs):
'\n Args:\n ckpt:\n The checkpoint path for loading your pretrained weights.\n Can be assigned by the -k option in run_downstream.py\n\n model_config:\n The config path for constructing your model.\n Might not needed if you also save that in your checkpoint file.\n Can be assigned by the -g option in run_downstream.py\n '
super().__init__(**kwargs)
self.checkpoint = torch.load(ckpt, map_location='cpu')
self.upstream_type = kwargs['upstream']
self.mos_upstream = self._get_mos_upstream()
self.mos_featurizer = self._get_mos_featurizer()
self.mos_downstream = self._get_mos_downstream()
self.segments_durations = 1
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor], Dict[(str, Tensor)])])]:
'\n When the returning Dict contains the List or Dict with more than one Tensor,\n those Tensors should be in the same shape if one wished to weighted sum them.\n '
wavs_segments = [self.preprocessor(wav) for wav in wavs]
flattened_wavs_segments = [wav_segment for wav_segments in wavs_segments for wav_segment in wav_segments]
wav_segments_lengths = [len(wav_segments) for wav_segments in wavs_segments]
prefix_sums = list(accumulate(wav_segments_lengths, initial=0))
features = self.mos_upstream(flattened_wavs_segments)
features = self.mos_featurizer(flattened_wavs_segments, features)
features = torch.stack(features)
segments_scores = self.mos_downstream(features)
scores = []
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
scores.append(current_segment_scores.mean(dim=(- 1)))
scores = torch.FloatTensor(scores)
return {'scores': scores}
def preprocessor(self, wav):
wav_segments = unfold_segments(wav, self.segments_durations)
return wav_segments
def _get_mos_upstream(self):
mos_upstream = getattr(s3prl.hub, self.upstream_type)()
if (self.upstream_type == 'tera'):
self.checkpoint['Upstream']['transformer.extracter._melscale.fb'] = torch.tensor([])
mos_upstream.load_state_dict(self.checkpoint['Upstream'])
return mos_upstream
def _get_mos_featurizer(self):
return Featurizer(self.mos_upstream, upstream_device='cpu')
def _get_mos_downstream(self):
mos_downstream = MosDownstream(upstream_dim=self.mos_featurizer.output_dim, projector_dim=self.checkpoint['Config']['downstream_expert']['modelrc']['projector_dim'], clipping=self.checkpoint['Config']['downstream_expert']['modelrc']['clipping'], attention_pooling=self.checkpoint['Config']['downstream_expert']['modelrc']['attention_pooling'])
mos_downstream.load_state_dict(self.checkpoint['Downstream'])
return mos_downstream
|
def mos_wav2vec2_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'wav2vec2'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_wav2vec2_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_wav2vec2_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_wav2vec2(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/s9zpouk5svu1a4l/wav2vec2-dev-SRCC-best.ckpt?dl=1'
return mos_wav2vec2_url(*args, refresh=refresh, **kwargs)
|
def mos_tera_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'tera'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_tera_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_tera_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_tera(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/w4jk5bujaoosk69/tera-dev-SRCC-best.ckpt?dl=1'
return mos_tera_url(*args, refresh=refresh, **kwargs)
|
def mos_apc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
kwargs['upstream'] = 'apc'
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mos_apc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mos_apc_local(_urls_to_filepaths(ckpt), *args, **kwargs)
|
def mos_apc(refresh=False, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/ulng31as15hsvz1/apc-dev-SRCC-best.ckpt?dl=1'
return mos_apc_url(*args, refresh=refresh, **kwargs)
|
class MosDownstream(nn.Module):
def __init__(self, upstream_dim, projector_dim, clipping, attention_pooling):
super(MosDownstream, self).__init__()
self.connector = nn.Linear(upstream_dim, projector_dim)
self.model = MosDownstreamModule(input_dim=projector_dim, clipping=clipping, attention_pooling=attention_pooling)
def forward(self, features):
features = self.connector(features)
scores = self.model(features)
return scores
|
class MosDownstreamModule(nn.Module):
def __init__(self, input_dim, clipping=False, attention_pooling=False, num_judges=5000, **kwargs):
super(MosDownstreamModule, self).__init__()
self.mean_net_linear = nn.Linear(input_dim, 1)
self.mean_net_clipping = clipping
self.mean_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.bias_net_linear = nn.Linear(input_dim, 1)
self.bias_net_pooling = (SelfAttentionPooling(input_dim) if attention_pooling else None)
self.judge_embbeding = nn.Embedding(num_embeddings=num_judges, embedding_dim=input_dim)
def forward(self, features, judge_ids=None):
if (self.mean_net_pooling is not None):
x = self.mean_net_pooling(features)
segment_score = self.mean_net_linear(x)
else:
x = self.mean_net_linear(features)
segment_score = x.squeeze((- 1)).mean(dim=(- 1))
if self.mean_net_clipping:
segment_score = ((torch.tanh(segment_score) * 2) + 3)
if (judge_ids is None):
return segment_score.squeeze((- 1))
else:
time = features.shape[1]
judge_features = self.judge_embbeding(judge_ids)
judge_features = torch.stack([judge_features for i in range(time)], dim=1)
bias_features = (features + judge_features)
if (self.bias_net_pooling is not None):
y = self.bias_net_pooling(bias_features)
bias_score = self.bias_net_linear(y)
else:
y = self.bias_net_linear(bias_features)
bias_score = y.squeeze((- 1)).mean(dim=(- 1))
bias_score = (bias_score + segment_score)
return (segment_score.squeeze((- 1)), bias_score.squeeze((- 1)))
|
class SelfAttentionPooling(nn.Module):
'\n Implementation of SelfAttentionPooling\n Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition\n https://arxiv.org/pdf/2008.01077v1.pdf\n '
def __init__(self, input_dim):
super(SelfAttentionPooling, self).__init__()
self.W = nn.Linear(input_dim, 1)
def forward(self, batch_rep):
'\n input:\n batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension\n\n attention_weight:\n att_w : size (N, T, 1)\n\n return:\n utter_rep: size (N, H)\n '
softmax = nn.functional.softmax
att_w = softmax(self.W(batch_rep).squeeze((- 1))).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return utter_rep
|
def unfold_segments(tensor, tgt_duration, sample_rate=16000):
seg_lengths = int((tgt_duration * sample_rate))
src_lengths = len(tensor)
step = (seg_lengths // 2)
tgt_lengths = (seg_lengths if (src_lengths <= seg_lengths) else (((src_lengths // step) + 1) * step))
pad_lengths = (tgt_lengths - src_lengths)
padded_tensor = torch.cat([tensor, torch.zeros(pad_lengths).to(tensor.device)])
segments = padded_tensor.unfold(0, seg_lengths, step).unbind(0)
return segments
|
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None):
from fairseq.data.dictionary import Dictionary
(state, cfg) = load_fairseq_ckpt(fairseq_source)
dicts: List[Dictionary] = state['task_state']['dictionaries']
symbols = [dictionary.symbols for dictionary in dicts]
output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'dictionaries_symbols': symbols}
if (output_path is not None):
Path(output_path).parent.mkdir(exist_ok=True, parents=True)
torch.save(output_state, output_path)
|
def load_converted_model(ckpt: str):
ckpt_state = torch.load(ckpt, map_location='cpu')
for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'dictionaries_symbols']:
if (required_key not in ckpt_state):
raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing')
task_cfg = merge_with_parent(MultiresHubertPretrainingConfig, ckpt_state['task_cfg'])
model_cfg = merge_with_parent(MultiresHubertConfig, ckpt_state['model_cfg'])
model = MultiresHubertModel(model_cfg, task_cfg, ckpt_state['dictionaries_symbols'])
model.load_state_dict(ckpt_state['model_weight'])
return (model, task_cfg)
|
def multires_hubert_custom(ckpt: str, refresh: bool=False, **kwargs):
if ckpt.startswith('http'):
ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt=ckpt, **kwargs)
|
def multires_hubert_local(*args, **kwargs):
return multires_hubert_custom(*args, **kwargs)
|
def multires_hubert_base(refresh=False, **kwargs):
'\n The monolingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/mrhubert_mono_base.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_large(refresh=False, **kwargs):
'\n The monolingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/mrhubert_mono_large.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_base(refresh=False, **kwargs):
'\n The multilingual base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_base.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_large400k(refresh=False, **kwargs):
'\n The multilingual large model (400k steps)\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_large_400k.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
def multires_hubert_multilingual_large600k(refresh=False, **kwargs):
'\n The multilingual large model (600k steps)\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mr_hubert/resolve/main/multi_large_600k.pt'
return multires_hubert_custom(refresh=refresh, **kwargs)
|
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if (mode != 'global'):
raise NotImplementedError('Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if (self.mode == 'global'):
return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True)))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
|
class FeatureExtractor(nn.Module):
'Feature extractor, transforming file path to Mel spectrogram'
def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs):
super(FeatureExtractor, self).__init__()
assert (mode == 'fbank'), 'Only Mel-spectrogram implemented'
self.mode = mode
self.extract_fn = kaldi.fbank
self.apply_cmvn = apply_cmvn
if self.apply_cmvn:
self.cmvn = CMVN()
self.num_mel_bins = num_mel_bins
self.kwargs = kwargs
self.decode_wav = decode_wav
if self.decode_wav:
torchaudio.set_audio_backend('soundfile')
def _load_file(self, filepath):
if self.decode_wav:
(waveform, sample_rate) = torchaudio.load_wav(filepath)
else:
(waveform, sample_rate) = torchaudio.load(filepath)
return (waveform, sample_rate)
def forward(self, waveform):
y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs)
if self.apply_cmvn:
y = y.transpose(0, 1).unsqueeze(0)
y = self.cmvn(y)
y = y.squeeze(0).transpose(0, 1)
return y
def extra_repr(self):
return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins)
def create_msg(self):
'List msg for verbose function'
msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn)
return [msg]
|
def create_transform(audio_config):
feat_type = audio_config.pop('feat_type')
feat_dim = audio_config.pop('feat_dim')
decode_wav = audio_config.pop('decode_wav', False)
apply_cmvn = audio_config.pop('cmvn', True)
transforms = FeatureExtractor(feat_type, feat_dim, decode_wav, apply_cmvn, **audio_config)
return (transforms, feat_dim)
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
ckpt = torch.load(ckpt, map_location='cpu')
config = ckpt['config']
(self.preprocessor, feat_dim) = create_transform(config['data']['audio'])
self.model = NPC(feat_dim, **config['model']['paras'])
self.model.load_state_dict(ckpt['model'])
if (len(self.hooks) == 0):
for (block_id, _) in enumerate(self.model.blocks):
self.add_hook(f'self.model.blocks[{block_id}]', (lambda input, output: output.transpose(1, 2)))
for (masked_conv_id, _) in enumerate(self.model.masked_convs):
self.add_hook(f'self.model.masked_convs[{masked_conv_id}]', (lambda input, output: output))
self.add_hook('self.model', (lambda input, output: output[1]))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
features = [self.preprocessor(wav.unsqueeze(0)) for wav in wavs]
features = pad_sequence(features, batch_first=True)
(predicted_BxLxM, features) = self.model(features, testing=(not self.training))
|
def npc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def npc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return npc_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def npc(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return npc_360hr(*args, refresh=refresh, **kwargs)
|
def npc_360hr(refresh=False, *args, **kwargs):
'\n The npc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/npc_360hr.ckpt'
return npc_url(*args, refresh=refresh, **kwargs)
|
def npc_960hr(refresh=False, *args, **kwargs):
'\n The npc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/npc_960hr.ckpt'
return npc_url(*args, refresh=refresh, **kwargs)
|
class VQLayer(nn.Module):
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Defines a VQ layer that follows an RNN layer.\n input_size: an int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size: an int indicating the number of codes.\n code_dim: an int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature: a float indicating the temperature for gumbel-softmax.\n '
super(VQLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing, lens=None):
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return (logits_BxLxC, codes_BxLxE)
def report_ppx(self):
'Computes perplexity of distribution over codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'Computes usage each entry in codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, model_config, **kwargs):
super().__init__(**kwargs)
try:
from pase.models.frontend import wf_builder
except ModuleNotFoundError:
logger.error('Please check https://github.com/s3prl/s3prl/blob/master/s3prl/upstream/pase/README.md for how to install the dependencies of PASE+.')
raise
def build_pase(ckpt, model_config):
pase = wf_builder(model_config)
pase.load_pretrained(ckpt, load_last=True, verbose=False)
return pase
self.model = build_pase(ckpt, model_config)
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
wavs = pad_sequence(wavs, batch_first=True)
wavs = wavs.unsqueeze(1)
features = self.model(wavs)
features = features.transpose(1, 2).contiguous()
|
class UpstreamExpert(nn.Module):
def __init__(self, name: str, refresh=False, window_secs: float=0.16, stride_secs: float=0.05):
super().__init__()
self.resampler = torchaudio.transforms.Resample(16000, 32000)
self.module = importlib.import_module(f'.hear21passt.{name}', __package__)
self.model = self.module.load_model(timestamp_window=(window_secs * 1000), timestamp_hop=(stride_secs * 1000))
self.stride_secs = stride_secs
def get_downsample_rates(self, key=None):
return int((self.stride_secs * SAMPLE_RATE))
def forward(self, wavs: List[torch.Tensor]):
wavs = pad_sequence(wavs, batch_first=True)
wavs = self.resampler(wavs)
(embs, timestamps) = self.module.get_timestamp_embeddings(wavs, self.model)
return {'hidden_states': [embs]}
|
def embeding_size(hop=50, embeding_size=1000):
embedings = ((20 * 60) * (1000 / hop))
return (((embedings * embeding_size) * 4) / ((1024 * 1024) * 1024))
|
def load_model(model_path=''):
model = get_concat_2levelmel_model()
if torch.cuda.is_available():
model.cuda()
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
model.eval()
with torch.no_grad():
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvlmel_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
return model.get_timestamp_embeddings(audio)
|
def get_basic_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net)
return model
|
def get_concat_2level_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2))
return model
|
def get_2lvl_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def get_concat_2levelmel_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)))
return model
|
def get_2lvlmel_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
model.eval()
with torch.no_grad():
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=1920)
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 4))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', **kwds):
model = get_basic_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, **kwargs)
return model
|
def load_model(model_path='', mode='all', scene_hop=5000, **kwds):
'\n scene_hop: The hop size for the ovelaping windows\n in case the scene audio lenght is larger than 20 seconds.\n Returns:\n model: wrapped PaSST model that can take up to 20 seconds\n of audio without averaging the embeddings.\n '
model = get_basic_model(mode=mode, scene_hop=scene_hop, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_20sec', input_tdim=2000)
model = PasstBasicWrapper(mel=mel, net=net, max_model_window=20000, **kwargs)
return model
|
def load_model(model_path='', mode='all', **kwds):
model = get_concat_2level_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvl_timestamp_embeddings(audio, model)
|
def get_concat_2level_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(1295 * 2), **kwargs)
return model
|
def get_2lvl_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 5))
embed = torch.cat((embed1, embed2), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', **kwds):
model = get_concat_2levelmel_model(mode=mode, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_2lvlmel_timestamp_embeddings(audio, model)
|
def get_concat_2levelmel_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel, net=net, timestamp_embedding_size=(768 + (1295 * 2)), **kwargs)
return model
|
def get_2lvlmel_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=1920)
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 4))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1)
|
def load_model(model_path='', mode='all', scene_hop=10000, **kwds):
'\n scene_hop: The hop size for the ovelaping windows\n in case the scene audio lenght is larger than 30 seconds.\n Returns:\n model: wrapped PaSST model that can take up to 30 seconds\n of audio without averaging the embeddings.\n '
model = get_basic_model(mode=mode, scene_hop=scene_hop, **kwds)
return model
|
def get_scene_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, model.scene_embedding_size).\n '
return model.get_scene_embeddings(audio)
|
def get_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return get_basic_timestamp_embeddings(audio, model)
|
def get_basic_timestamp_embeddings(audio, model):
'\n audio: n_sounds x n_samples of mono audio in the range [-1, 1]. All sounds in a batch will be padded/trimmed to the same length.\n model: Loaded Model.\n Returns:\n embedding: A float32 Tensor with shape (n_sounds, n_timestamps, model.timestamp_embedding_size).\n timestamps: A float32 Tensor with shape (`n_sounds, n_timestamps). Centered timestamps in milliseconds corresponding to each embedding in the output.\n '
return model.get_timestamp_embeddings(audio)
|
def get_basic_model(**kwargs):
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_30sec', input_tdim=3000)
model = PasstBasicWrapper(mel=mel, net=net, max_model_window=30000, **kwargs)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.