code stringlengths 17 6.64M |
|---|
def create_transform():
return FeatureExtractor()
|
def decoar_layers_custom(ckpt: str, refresh=False, *args, **kwargs):
if ckpt.startswith('http'):
ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def decoar_layers_local(*args, **kwargs):
return decoar_layers_custom(*args, **kwargs)
|
def decoar_layers_url(*args, **kwargs):
return decoar_layers_custom(*args, **kwargs)
|
def decoar_layers(*args, refresh=False, **kwargs):
'\n The apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/checkpoint_decoar.pt'
return decoar_layers_url(*args, refresh=refresh, **kwargs)
|
class UpstreamExpert(UpstreamBase):
'\n The Distiller wrapper\n '
def __init__(self, ckpt, model_config=None, **kwargs):
super().__init__(**kwargs)
if (model_config is not None):
print('[UpstreamExpert] - Using upstream expert config file from:', model_config)
with open(model_config, 'r') as file:
options = yaml.load(file, Loader=yaml.FullLoader)
else:
print('[UpstreamExpert] - Using the default upstream expert config')
options = {'load_pretrain': 'True', 'no_grad': 'False', 'permute_input': 'False'}
options['ckpt_file'] = ckpt
self.model = PretrainedDistiller(options)
def get_downsample_rates(self, key: str) -> int:
return 320
def forward(self, wavs, no_pred=False):
(_, feat_final, pred, pad_mask, layer_hidden) = self.model(wavs, get_hidden=True, no_pred=no_pred)
if (not no_pred):
hidden_feats = pred.transpose(0, 1).split(1, 0)
hidden_feats = [hid.squeeze(0) for hid in hidden_feats]
else:
hidden_feats = []
hidden_feats = (([feat_final] + layer_hidden) + hidden_feats)
states = {'last_hidden_state': (None if no_pred else hidden_feats[(- 1)]), 'hidden_states': hidden_feats, 'pad_mask': pad_mask, 'paper': layer_hidden[(- 1)]}
return states
|
def distiller_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def distiller_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from url\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n '
return distiller_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def distilhubert(refresh=False, *args, **kwargs):
'\n DistilHuBERT\n '
return distilhubert_base(*args, refresh=refresh, **kwargs)
|
def distilhubert_base(refresh=False, *args, **kwargs):
'\n DistilHuBERT Base\n Default model in https://arxiv.org/abs/2110.01900\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/distilhubert/resolve/main/distilhubert_ls960_4-8-12.ckpt'
return distiller_url(*args, refresh=refresh, **kwargs)
|
def init_bert_params(module):
'\n Initialize the weights specific to the BERT Model.\n This overrides the default initializations depending on the specified arguments.\n 1. If normal_init_linear_weights is set then weights of linear\n layer will be initialized using the normal distribution and\n bais will be set to the specified value.\n 2. If normal_init_embed_weights is set then weights of embedding\n layer will be initialized using the normal distribution.\n 3. If normal_init_proj_weights is set then weights of\n in_project_weight for MultiHeadAttention initialized using\n the normal distribution (to be validated).\n '
def normal_(data):
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if (module.bias is not None):
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if (module.padding_idx is not None):
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
|
class SplitLinear(nn.Module):
'Split Linear Layer'
def __init__(self, in_dim, in_split, out_dim):
super().__init__()
self.in_dim = in_dim
self.in_split = in_split
self.out_dim = out_dim
if (in_split > 1):
weight = torch.zeros((self.in_split, self.in_dim, self.out_dim))
self.weight = nn.Parameter(weight, requires_grad=True)
nn.init.uniform_(self.weight, (- (self.in_dim ** (- 0.5))), (self.in_dim ** (- 0.5)))
bias = torch.zeros((1, 1, self.in_split, self.out_dim))
self.bias = nn.Parameter(bias, requires_grad=True)
nn.init.uniform_(self.bias, (- (self.in_dim ** (- 0.5))), (self.in_dim ** (- 0.5)))
else:
self.layer = nn.Linear(self.in_dim, self.out_dim)
def forward(self, x: torch.Tensor):
if (self.in_split == 1):
return self.layer(x)
else:
x = x.reshape(x.shape[0], x.shape[1], self.in_split, 1, self.in_dim)
out = torch.einsum('...klm,kmn->...kln', x, self.weight).squeeze(3)
out = (out + self.bias)
return out.reshape(x.shape[0], x.shape[1], (- 1))
|
class TransformerSentenceEncoderLayer(nn.Module):
'\n Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained\n models.\n '
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', layer_norm_first: bool=False, attention_type: str='original') -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = get_activation_fn(activation_fn)
self.attention_type = attention_type
if (attention_type == 'original'):
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True)
elif (attention_type == 'sparse'):
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True, stride=32, expressivity=16)
elif (attention_type == 'dynamic'):
from fairseq.modules import DynamicConv
self.self_attn = DynamicConv(self.embedding_dim, kernel_size=31, padding_l=15, num_heads=num_attention_heads, weight_dropout=0.0, weight_softmax=True, bias=True)
else:
raise NotImplementedError(f'Unknown attention type {attention_type}')
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
self.self_attn_layer_norm = nn.LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = nn.LayerNorm(self.embedding_dim)
def forward_self_attn(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None, need_weights: bool=False):
if (self.attention_type in ['original', 'sparse']):
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=need_weights, attn_mask=self_attn_mask)
elif (self.attention_type == 'dynamic'):
x = self.self_attn(x)
attn = None
return (x, attn)
def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None, need_weights: bool=False, att_args=None):
'\n LayerNorm is applied either before or after the self-attention/ffn\n modules similar to the original Transformer imlementation.\n '
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
(x, attn) = self.forward_self_attn(x, self_attn_mask=self_attn_mask, need_weights=False, self_attn_padding_mask=self_attn_padding_mask)
x = self.dropout1(x)
x = (residual + x)
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = (residual + x)
else:
(x, attn) = self.forward_self_attn(x, self_attn_mask=self_attn_mask, need_weights=need_weights, self_attn_padding_mask=self_attn_padding_mask)
x = self.dropout1(x)
x = (residual + x)
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = (residual + x)
x = self.final_layer_norm(x)
return (x, attn)
|
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(self.embedding_dim, self.embedding_dim, kernel_size=args.conv_pos, padding=(args.conv_pos // 2), groups=args.conv_pos_groups)
dropout = 0
std = math.sqrt(((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim)))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name='weight', dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
print(f'[TransformerEncoder] - Attention type = {args.attention_type}')
self.layers = nn.ModuleList([TransformerSentenceEncoderLayer(embedding_dim=self.embedding_dim, ffn_embedding_dim=args.encoder_ffn_embed_dim, num_attention_heads=args.encoder_attention_heads, dropout=self.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_fn=args.activation_fn, layer_norm_first=args.layer_norm_first, attention_type=args.attention_type) for _ in range(args.encoder_layers)])
self.layer_norm_first = args.layer_norm_first
self.layer_norm = nn.LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, attn_mask=None, get_hidden=False):
(x, layer_results) = self.extract_features(x, padding_mask, attn_mask, get_hidden=get_hidden)
if self.layer_norm_first:
x = self.layer_norm(x)
return (x, layer_results)
def extract_features(self, x, padding_mask=None, attn_mask=None, get_hidden=False):
if (padding_mask is not None):
x[padding_mask] = 0
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = (x + x_conv)
if (not self.layer_norm_first):
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
layer_results = []
for (i, layer) in enumerate(self.layers):
dropout_probability = np.random.random()
if ((not self.training) or (dropout_probability > self.layerdrop)):
(x, z) = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, self_attn_mask=attn_mask)
if get_hidden:
layer_results.append(x.transpose(0, 1))
x = x.transpose(0, 1)
return (x, layer_results)
|
class UpstreamExpert(torch.nn.Module):
def __init__(self, ckpt, config=None, **kwargs):
super().__init__()
device = ('cuda' if torch.cuda.is_available() else 'cpu')
assert (HubertTask is not None), 'ESPnet is not installed, run `external_tools/install_espnet.sh` to install'
(hubert_model, hubert_train_args) = HubertTask.build_model_from_file(config, ckpt, device)
self.device = next(hubert_model.parameters()).device
self.model = hubert_model.encoder.hubert_pretrain_model
def get_downsample_rates(self, key: str=None) -> int:
return 320
def forward(self, wavs):
wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(self.device)
wavs = pad_sequence(wavs, batch_first=True).to(self.device)
feats = self.model.wav2vec2.extract_features(wavs, wav_lengths)[0]
return {'hidden_states': feats}
|
def espnet_hubert_custom(ckpt, *args, config=None, **kwargs):
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def espnet_hubert_local(*args, **kwargs):
return espnet_hubert_custom(*args, **kwargs)
|
def cvhubert(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/espnet_cvhubert/resolve/main/exp/hubert_iter2_train_ssl_torchaudiohubert_base_960h_pretrain_it2_raw/latest.pth'
config_url = 'https://huggingface.co/espnet/espnet_cvhubert/raw/main/exp/hubert_iter2_train_ssl_torchaudiohubert_base_960h_pretrain_it2_raw/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def wavlablm_ek_40k(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/WavLabLM-EK-40k/resolve/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_cont_raw_layer_9/5epoch.pth'
config_url = 'https://huggingface.co/espnet/WavLabLM-EK-40k/raw/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_cont_raw_layer_9/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def wavlablm_ms_40k(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/WavLabLM-MS-40k/resolve/main/exp_babel/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_babel_light_raw_layer_9/5epoch.pth'
config_url = 'https://huggingface.co/espnet/WavLabLM-MS-40k/raw/main/exp_babel/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_babel_light_raw_layer_9/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def wavlablm_mk_40k(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/WavLabLM-MK-40k/resolve/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_raw_layer_9/valid.acc_m.ave_10best.pth'
config_url = 'https://huggingface.co/espnet/WavLabLM-MK-40k/raw/main/exp_li/hubert_iter2_train_ssl_torchaudiohubert_large_960h_pretrain_it2_wavlm_raw_layer_9/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def espnet_hubert_base_iter1(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/resolve/main/exp/hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/valid.loss.ave.pth'
config_url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/raw/main/exp/hubert_iter1_train_ssl_torchaudiohubert_base_960h_pretrain_it1_raw/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def espnet_hubert_base_iter0(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/resolve/main/exp/hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/valid.loss.ave.pth'
config_url = 'https://huggingface.co/espnet/simpleoier_librispeech_hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/raw/main/exp/hubert_iter0_train_ssl_torchaudiohubert_base_960h_pretrain_it0_raw/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
def espnet_hubert_large_gs_ll60k(*args, refresh=False, **kwargs):
url = 'https://huggingface.co/espnet/hubert_large_gs_16_librilight60k/resolve/main/mnt/datastore/exp/hubert_iter1_train_ssl_torchaudiohubert_large_960h_pretrain_it2_bins_raw/valid.loss.ave_10best.pth'
config_url = 'https://huggingface.co/espnet/hubert_large_gs_16_librilight60k/blob/main/mnt/datastore/exp/hubert_iter1_train_ssl_torchaudiohubert_large_960h_pretrain_it2_bins_raw/config.yaml'
(ckpt, config) = _urls_to_filepaths(url, config_url, refresh=refresh)
return espnet_hubert_custom(ckpt, config)
|
class UpstreamExpert(nn.Module):
def __init__(self, ckpt: str=None, model_config: str=None, **kwargs):
'\n Args:\n ckpt:\n The checkpoint path for loading your pretrained weights.\n Can be assigned by the -k option in run_downstream.py\n\n model_config:\n The config path for constructing your model.\n Might not needed if you also save that in your checkpoint file.\n Can be assigned by the -g option in run_downstream.py\n '
super().__init__()
self.name = '[Example UpstreamExpert]'
print(f'{self.name} - You can use model_config to construct your customized model: {model_config}')
print(f'{self.name} - You can use ckpt to load your pretrained weights: {ckpt}')
print(f"{self.name} - If you store the pretrained weights and model config in a single file, you can just choose one argument (ckpt or model_config) to pass. It's up to you!")
self.model1 = nn.Linear(1, HIDDEN_DIM)
self.model2 = nn.Linear(HIDDEN_DIM, HIDDEN_DIM)
def get_downsample_rates(self, key: str) -> int:
"\n Since we do not do any downsampling in this example upstream\n All keys' corresponding representations have downsample rate of 1\n "
return 1
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor])])]:
'\n When the returning Dict contains the List with more than one Tensor,\n those Tensors should be in the same shape to train a weighted-sum on them.\n '
wavs = pad_sequence(wavs, batch_first=True).unsqueeze((- 1))
hidden = self.model1(wavs)
feature = self.model2(hidden)
return {'hidden_states': [hidden, feature], 'PR': [hidden, feature], 'ASR': [hidden, feature], 'QbE': [hidden, feature], 'SID': [hidden, feature], 'ASV': [hidden, feature], 'SD': [hidden, feature], 'ER': [hidden, feature], 'SF': [hidden, feature], 'SE': [hidden, feature], 'SS': [hidden, feature], 'secret': [hidden, feature]}
|
def customized_upstream(*args, **kwargs):
"\n To enable your customized pretrained model, you only need to implement\n upstream/example/expert.py and leave this file as is. This file is\n used to register the UpstreamExpert in upstream/example/expert.py\n The following is a brief introduction of the registration mechanism.\n\n The s3prl/hub.py will collect all the entries registered in this file\n (callable variables without the underscore prefix) as a centralized\n upstream factory. One can pick up this upstream from the factory via\n\n 1.\n from s3prl.hub import customized_upstream\n model = customized_upstream(ckpt, model_config)\n\n 2.\n model = torch.hub.load(\n 'your_s3prl_path',\n 'customized_upstream',\n ckpt,\n model_config,\n source='local',\n )\n\n Our run_downstream.py and downstream/runner.py follows the first usage\n "
return _UpstreamExpert(*args, **kwargs)
|
class UpstreamExpert(torch.nn.Module):
def __init__(self, ckpt, **kwds):
super().__init__()
try:
self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(ckpt)
except:
if ('base' in ckpt):
alter_extractor = 'facebook/hubert-base-ls960'
else:
alter_extractor = 'facebook/hubert-large-ll60k'
logger.info(f"The model {ckpt} on huggingface does not have a correspoinding feature extractor. Using {alter_extractor}'s feature extractor as the alternative.")
self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(alter_extractor)
self.model = HubertModel.from_pretrained(ckpt)
def get_downsample_rates(self, key: str=None) -> int:
return 320
def forward(self, wavs):
device = wavs[0].device
wavs = [wav.detach().cpu().numpy() for wav in wavs]
input_values = self.extracter(wavs, return_tensors='pt', padding=True, return_attention_mask=True, sampling_rate=SAMPLE_RATE).to(device)
output_values = self.model(**input_values, output_hidden_states=True)
return {'hidden_states': output_values.hidden_states}
|
def hf_hubert_custom(ckpt, *args, **kwargs):
return _UpstreamExpert(ckpt, *args, **kwargs)
|
class UpstreamExpert(torch.nn.Module):
def __init__(self, ckpt, **kwds):
super().__init__()
self.extracter = Wav2Vec2FeatureExtractor.from_pretrained(ckpt)
self.model = Wav2Vec2Model.from_pretrained(ckpt)
def get_downsample_rates(self, key: str=None) -> int:
return 320
def forward(self, wavs):
device = wavs[0].device
wavs = [wav.detach().cpu().numpy() for wav in wavs]
input_values = self.extracter(wavs, return_tensors='pt', padding=True, return_attention_mask=True, sampling_rate=SAMPLE_RATE).to(device)
output_values = self.model(**input_values, output_hidden_states=True)
return {'hidden_states': output_values.hidden_states}
|
def hf_wav2vec2_custom(ckpt, *args, **kwargs):
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def load_and_convert_fairseq_ckpt(fairseq_source: str, output_path: str=None):
from fairseq.data.dictionary import Dictionary
(state, cfg) = load_fairseq_ckpt(fairseq_source)
dicts: List[Dictionary] = state['task_state']['dictionaries']
symbols = [dictionary.symbols for dictionary in dicts]
output_state = {'task_cfg': cfg['task'], 'model_cfg': cfg['model'], 'model_weight': state['model'], 'dictionaries_symbols': symbols}
if (output_path is not None):
Path(output_path).parent.mkdir(exist_ok=True, parents=True)
torch.save(output_state, output_path)
|
def load_converted_model(ckpt: str):
ckpt_state = torch.load(ckpt, map_location='cpu')
for required_key in ['task_cfg', 'model_cfg', 'model_weight', 'dictionaries_symbols']:
if (required_key not in ckpt_state):
raise ValueError(f'{ckpt} is not a valid checkpoint since the required key: {required_key} is missing')
task_cfg = merge_with_parent(HubertPretrainingConfig, ckpt_state['task_cfg'])
model_cfg = merge_with_parent(HubertConfig, ckpt_state['model_cfg'])
model = HubertModel(model_cfg, task_cfg, ckpt_state['dictionaries_symbols'])
model.load_state_dict(ckpt_state['model_weight'])
return (model, task_cfg)
|
def hubert_custom(ckpt: str, legacy: bool=False, fairseq: bool=False, refresh: bool=False, **kwargs):
assert (not (legacy and fairseq)), "The option 'legacy' will directly load a fairseq checkpoint, while the option 'fairseq' will first convert the fairseq checkpoint to be fairseq indenpendent and then load the checkpoint. These two options cannot be used jointly."
if ckpt.startswith('http'):
ckpt = _urls_to_filepaths(ckpt, refresh=refresh)
if fairseq:
ckpt: Path = Path(ckpt)
converted_ckpt = (ckpt.parent / f'{ckpt.stem}.converted.pt')
lock_file = Path((str(converted_ckpt) + '.lock'))
logger.info(f'Converting a fairseq checkpoint: {ckpt}')
logger.info(f'To: {converted_ckpt}')
with FileLock(str(lock_file)):
if ((not converted_ckpt.is_file()) or (refresh and ((time.time() - os.path.getmtime(ckpt)) > NEW_ENOUGH_SECS))):
load_and_convert_fairseq_ckpt(ckpt, converted_ckpt)
ckpt = converted_ckpt
assert os.path.isfile(ckpt)
if legacy:
return _LegacyUpstreamExpert(ckpt, **kwargs)
else:
return _UpstreamExpert(ckpt, **kwargs)
|
def hubert_local(*args, **kwargs):
return hubert_custom(*args, **kwargs)
|
def hubert_url(*args, **kwargs):
return hubert_custom(*args, **kwargs)
|
def hubert(refresh=False, *args, **kwargs):
'\n The default model - Base\n refresh (bool): whether to download ckpt/config again if existed\n '
return hubert_base(*args, refresh=refresh, **kwargs)
|
def hubert_base(refresh=False, legacy=False, **kwargs):
'\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt'
if (not legacy):
kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/hubert_base_ls960.pt'
return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
|
def hubert_large_ll60k(refresh=False, legacy=False, **kwargs):
'\n The Large model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://dl.fbaipublicfiles.com/hubert/hubert_large_ll60k.pt'
if (not legacy):
kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/hubert_large_ll60k.pt'
return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
|
def hubert_base_robust_mgr(refresh=False, legacy=False, **kwargs):
'\n The Base model, continually trained with Libri 960 hr with Musan noise, Gaussian noise and Reverberation.\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/kphuang68/HuBERT_base_robust_mgr/resolve/main/HuBERT_base_robust_mgr_best_loss_2.7821.pt'
if (not legacy):
kwargs['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/HuBERT_base_robust_mgr_best_loss_2.7821.pt'
return hubert_custom(refresh=refresh, legacy=legacy, **kwargs)
|
def mhubert_base_vp_en_es_fr_it3(refresh=False, **kwds):
kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/mhubert_base_vp_en_es_fr_it3.pt'
return hubert_custom(refresh=refresh, **kwds)
|
def contentvec(refresh=False, **kwds):
kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km100.pt'
return hubert_custom(refresh=refresh, **kwds)
|
def contentvec_km100(refresh=False, **kwds):
kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km100.pt'
return hubert_custom(refresh=refresh, **kwds)
|
def contentvec_km500(refresh=False, **kwds):
kwds['ckpt'] = 'https://huggingface.co/s3prl/converted_ckpts/resolve/main/contentvec_km500.pt'
return hubert_custom(refresh=refresh, **kwds)
|
class Hook():
def __init__(self, module_path, transform, unique_identifier=None):
self.module_path = module_path
self.transform = transform
self.unique_identifier = (unique_identifier or module_path)
self.handler = None
assert isinstance(self.module_path, str)
assert callable(self.transform)
assert isinstance(self.unique_identifier, str)
|
class initHook(type):
def __call__(cls, *args, **kwargs):
instance = super().__call__(*args, **kwargs)
for hook in instance.hooks:
if (hook.handler is None):
instance._register_hook_handler(hook)
return instance
|
class UpstreamBase(nn.Module, metaclass=initHook):
def __init__(self, hooks: List[Tuple]=None, hook_postprocess: Callable[([List[Tuple[(str, Tensor)]]], List[Tuple[(str, Tensor)]])]=None, **kwargs):
'\n Args:\n hooks: each Tuple is an argument list for the Hook initializer\n '
super().__init__()
self.hooks: List[Hook] = ([Hook(*hook) for hook in hooks] if hooks else [])
self.hook_postprocess = hook_postprocess
self._hook_hiddens: List[Tuple(str, Tensor)] = []
def remove_all_hooks(self):
for hook in self.hooks:
hook.handler.remove()
self.hooks.clear()
def remove_hook(self, unique_identifier: str):
updated_hooks = []
for hook in self.hooks:
if (hook.unique_identifier == unique_identifier):
hook.handler.remove()
else:
updated_hooks.append(hook)
self.hooks = updated_hooks
def add_hook(self, *args, **kwargs):
hook = Hook(*args, **kwargs)
self._register_hook_handler(hook)
self.hooks.append(hook)
def _register_hook_handler(self, hook: Hook):
module = eval(hook.module_path)
if (not isinstance(module, nn.Module)):
show(f'[UpstreamBase] - {hook.module_path} is not a valid nn.Module. Skip.', file=sys.stderr)
return
if callable(hook.handler):
show(f'[UpstreamBase] - Existing hook handler for {hook.unique_identifier} is found. Remove the existing one.', file=sys.stderr)
hook.handler.remove()
def generate_hook_handler(hiddens: List, hook: Hook):
def hook_handler(self, input, output):
hiddens.append((hook.unique_identifier, hook.transform(input, output)))
return hook_handler
hook.handler = module.register_forward_hook(generate_hook_handler(self._hook_hiddens, hook))
def __call__(self, wavs: List[Tensor], *args, **kwargs):
self._hook_hiddens.clear()
result = (super().__call__(wavs, *args, **kwargs) or {})
assert isinstance(result, dict)
if (len(self._hook_hiddens) > 0):
if ((result.get('_hidden_states_info') is not None) or (result.get('hidden_states') is not None) or (result.get('last_hidden_state') is not None)):
show("[UpstreamBase] - If there are registered hooks, '_hidden_states_info', 'hidden_states', and 'last_hidden_state' are reserved and should not be included in child class's return dict.", file=sys.stderr)
raise ValueError
hook_hiddens = self._hook_hiddens.copy()
self._hook_hiddens.clear()
if callable(self.hook_postprocess):
hook_hiddens = self.hook_postprocess(hook_hiddens)
(result['_hidden_states_info'], result['hidden_states']) = zip(*hook_hiddens)
result['last_hidden_state'] = result['hidden_states'][(- 1)]
for (layer_id, hidden_state) in enumerate(result['hidden_states']):
result[f'hidden_state_{layer_id}'] = hidden_state
return result
|
class Featurizer(nn.Module):
def __init__(self, upstream: UpstreamBase, feature_selection: str='hidden_states', upstream_device: str='cuda', layer_selection: int=None, normalize: bool=False, **kwargs):
super().__init__()
self.name = 'Featurizer'
upstream.eval()
paired_wavs = [torch.randn(SAMPLE_RATE).to(upstream_device)]
with torch.no_grad():
paired_features = upstream(paired_wavs)
if (feature_selection not in paired_features):
if ('hidden_states' in paired_features):
show(f'[{self.name}] - Warning: {feature_selection} is not a supported args.upstream_feature_selection. Using "hidden_states" as the default key.', file=sys.stderr)
feature_selection = 'hidden_states'
else:
show(f'[{self.name}] - Error: {feature_selection} is not a supported args.upstream_feature_selection. The default key "hidden_states" is also not supported. Please specify -s with the following options: {list(paired_wavs.keys())}', file=sys.stderr)
raise ValueError
self.feature_selection = feature_selection
self.layer_selection = layer_selection
self.normalize = normalize
feature = self._select_feature(paired_features)
if isinstance(feature, (list, tuple)):
self.layer_num = len(feature)
show(f'[{self.name}] - Take a list of {self.layer_num} features and weighted sum them.', file=sys.stderr)
self.weights = nn.Parameter(torch.zeros(self.layer_num))
feature = self._weighted_sum([f.cpu() for f in feature])
else:
feature = feature.cpu()
self.output_dim = feature.size((- 1))
if hasattr(upstream, 'get_downsample_rates'):
self.downsample_rate = upstream.get_downsample_rates(feature_selection)
show(f"[{self.name}] - The selected feature {feature_selection}'s downsample rate is {self.downsample_rate}", file=sys.stderr)
else:
self.downsample_rate = round((max((len(wav) for wav in paired_wavs)) / feature.size(1)))
show(f'[{self.name}] - Warning: The provided upstream does not give statis downsample rate by the "get_downsample_rates" interface (see upstream/example/expert.py). The downsample rate is calculated dynamically basing on the shape of the input waveforms v.s. the output features: {self.downsample_rate}', file=sys.stderr)
def _select_feature(self, features):
feature = features.get(self.feature_selection)
if isinstance(feature, dict):
feature = list(feature.values())
if (isinstance(feature, (list, tuple)) and (len(feature) == 1)):
feature = feature[0]
if (isinstance(feature, (list, tuple)) and isinstance(self.layer_selection, int)):
feature = feature[self.layer_selection]
return feature
def _weighted_sum(self, feature):
assert (self.layer_num == len(feature)), "If you run into this error, there is a great chance you are finetuning the upstream with wav2vec2's transformer blocks in weighted-sum mode (default), including wav2vec2, hubert, and decoar2. These models use the layerdrop technique which causes the different number of layer forwards between different model forwards, resulting in different number of hidden states for different model forwards. Hence, finetuning these upstreams is essentially incompatible with weight-sum mode unless you turn off the layerdrop option in fairseq. See: https://github.com/pytorch/fairseq/blob/f6abcc2a67328bee8b15c596bb626ce2d720aae6/fairseq/models/wav2vec/wav2vec2.py#L857 However, since finetuning upstreams will backward the gradient through all layers which serves the same functionality as weighted-sum: all layers can be used for different downstream tasks. Hence instead of finetuning upstream with weighted-sum, we suggest to follow the more common setting: finetuning upstream with the last layer. Please use the following options: --upstream_trainable --upstream_feature_selection last_hidden_state. Or: -f -s last_hidden_state"
stacked_feature = torch.stack(feature, dim=0)
if self.normalize:
stacked_feature = F.layer_norm(stacked_feature, (stacked_feature.shape[(- 1)],))
(_, *origin_shape) = stacked_feature.shape
stacked_feature = stacked_feature.view(self.layer_num, (- 1))
norm_weights = F.softmax(self.weights, dim=(- 1))
weighted_feature = (norm_weights.unsqueeze((- 1)) * stacked_feature).sum(dim=0)
weighted_feature = weighted_feature.view(*origin_shape)
return weighted_feature
def tolist(self, paired_wavs: List[Tensor], paired_feature: Tensor):
assert (paired_feature.dim() == 3), '(batch_size, max_seq_len, feat_dim)'
feature_len = [round((len(wav) / self.downsample_rate)) for wav in paired_wavs]
length_diff = abs((paired_feature.size(1) - round((max([len(wav) for wav in paired_wavs]) / self.downsample_rate))))
assert (length_diff < TOLERABLE_SEQLEN_DIFF), f'{length_diff} >= {TOLERABLE_SEQLEN_DIFF}'
feature = [f[:l] for (f, l) in zip(paired_feature, feature_len)]
return feature
def forward(self, paired_wavs: List[Tensor], paired_features: Dict[(str, Union[(Tensor, List[Tensor], Dict[(str, Tensor)])])]):
feature = self._select_feature(paired_features)
if isinstance(feature, (list, tuple)):
feature = self._weighted_sum(feature)
return self.tolist(paired_wavs, feature)
|
class UpstreamExpert(torch.nn.Module):
def __init__(self, ckpt, **kwds):
super().__init__()
checkpoint = torch.load(ckpt)
assert (checkpoint['cfg']['model']['_name'] in ['hubert_pruner', 'student_hubert'])
self.cfg = LightHuBERTConfig(checkpoint['cfg']['model'])
if (checkpoint['cfg']['model']['_name'] == 'hubert_pruner'):
if checkpoint['cfg']['model']['pruner_supernet'].lower().endswith('small.yaml'):
self.cfg.supernet_type = 'small'
elif checkpoint['cfg']['model']['pruner_supernet'].lower().endswith('base.yaml'):
self.cfg.supernet_type = 'base'
self.model = LightHuBERT(self.cfg)
self.model.load_state_dict(checkpoint['model'], strict=False)
if (checkpoint['cfg']['model']['_name'] == 'student_hubert'):
subnet = self.model.supernet.max_subnet
else:
subnet = self.model.supernet.subnet
self.model.set_sample_config(subnet)
self.model.encoder.layerdrop = 0.0
params = self.model.calc_sampled_param_num()
logger.info(f'LightHubert subnet (Params {(params / 1000000.0):.0f}M) | {subnet}')
def get_downsample_rates(self, key: str) -> int:
return 320
def forward(self, wavs):
wavs = [F.layer_norm(wav, wav.shape) for wav in wavs]
device = wavs[0].device
wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device)
wav_padding_mask = (~ torch.lt(torch.arange(max(wav_lengths)).unsqueeze(0).to(device), wav_lengths.unsqueeze(1)))
hs = self.model.extract_features(pad_sequence(wavs, batch_first=True), padding_mask=wav_padding_mask, ret_hs=True)[0]
return {'hidden_states': hs}
|
def lighthubert_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def lighthubert_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from google drive id\n ckpt (str): URL\n refresh (bool): whether to download ckpt/config again if existed\n '
return lighthubert_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def lighthubert(refresh=False, *args, **kargs):
'\n The default model - Small\n refresh (bool): whether to download ckpt/config again if existed\n '
return lighthubert_small(*args, refresh=refresh, **kargs)
|
def lighthubert_small(refresh=False, *args, **kwargs):
'\n The small model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_small.pt'
return lighthubert_url(*args, refresh=refresh, **kwargs)
|
def lighthubert_base(refresh=False, *args, **kwargs):
'\n The Base model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_base.pt'
return lighthubert_url(*args, refresh=refresh, **kwargs)
|
def lighthubert_stage1(refresh=False, *args, **kwargs):
'\n The Stage1 model\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/mechanicalsea/lighthubert/resolve/main/lighthubert_stage1.pt'
return lighthubert_url(*args, refresh=refresh, **kwargs)
|
def is_xla_tensor(tensor):
return (torch.is_tensor(tensor) and (tensor.device.type == 'xla'))
|
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze((- 1))
if (indices.size((- 1)) < tensor.size((- 1))):
indices = indices.expand_as(tensor)
tensor = (torch.mul(tensor, (~ indices)) + torch.mul(value, indices))
else:
tensor[indices] = value
return tensor
|
def pad_to_multiple(x, multiple, dim=(- 1), value=0):
if (x is None):
return (None, 0)
tsz = x.size(dim)
m = (tsz / multiple)
remainder = ((math.ceil(m) * multiple) - tsz)
if m.is_integer():
return (x, 0)
pad_offset = (((0,) * ((- 1) - dim)) * 2)
return (F.pad(x, (*pad_offset, 0, remainder), value=value), remainder)
|
def gelu_accurate(x):
if (not hasattr(gelu_accurate, '_a')):
gelu_accurate._a = math.sqrt((2 / math.pi))
return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3)))))))
|
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
|
def relu_squared(x: torch.Tensor):
return F.relu(x).pow(2)
|
def deprecation_warning(message, stacklevel=3):
warnings.warn(message, stacklevel=stacklevel)
|
def get_activation_fn(activation: str) -> Callable:
'Returns the activation function corresponding to `activation`'
def gelu_accurate(x):
if (not hasattr(gelu_accurate, '_a')):
gelu_accurate._a = math.sqrt((2 / math.pi))
return ((0.5 * x) * (1 + torch.tanh((gelu_accurate._a * (x + (0.044715 * torch.pow(x, 3)))))))
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
if (activation == 'relu'):
return F.relu
elif (activation == 'relu_squared'):
return relu_squared
elif (activation == 'gelu'):
return gelu
elif (activation == 'gelu_fast'):
deprecation_warning('--activation-fn=gelu_fast has been renamed to gelu_accurate')
return gelu_accurate
elif (activation == 'gelu_accurate'):
return gelu_accurate
elif (activation == 'tanh'):
return torch.tanh
elif (activation == 'linear'):
return (lambda x: x)
elif (activation == 'swish'):
return torch.nn.SiLU
else:
raise RuntimeError('--activation-fn {} not supported'.format(activation))
|
class SLayerNorm(nn.LayerNorm):
'LayerNorm: variable 1-D size\n __base__: torch.nn.LayerNorm\n '
def __init__(self, normalized_shape: int, eps: float=1e-05, elementwise_affine: bool=True) -> None:
super(SLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
self.staticize()
def staticize(self):
self.sample_normalized_shape = self.normalized_shape[0]
self.samples = {'weight': self.weight, 'bias': self.bias}
def set_sample_config(self, sample_normalized_shape: int):
self.sample_normalized_shape = sample_normalized_shape
self._sample_parameters()
def _sample_parameters(self):
if self.elementwise_affine:
self.samples['weight'] = self.weight[:self.sample_normalized_shape]
self.samples['bias'] = self.bias[:self.sample_normalized_shape]
else:
self.samples['weight'] = None
self.samples['bias'] = None
return self.samples
def calc_sampled_param_num(self):
return (self.samples['weight'].numel() + self.samples['bias'].numel())
def get_complexity(self, sequence_length):
return (sequence_length * self.sample_normalized_shape)
@property
def weights(self):
return (self.samples['weight'] if self.elementwise_affine else None)
@property
def biases(self):
return (self.samples['bias'] if self.elementwise_affine else None)
@property
def normalized_shapes(self):
if isinstance(self.sample_normalized_shape, numbers.Integral):
sample_normalized_shape = (self.sample_normalized_shape,)
else:
sample_normalized_shape = self.sample_normalized_shape
return tuple(sample_normalized_shape)
def forward(self, input: Tensor) -> Tensor:
self._sample_parameters()
return F.layer_norm(input, self.normalized_shapes, self.weights, self.biases, self.eps)
def extra_repr(self) -> str:
return f'{self.normalized_shape}, eps={self.eps}, elementwise_affine={self.elementwise_affine}'
def clone_model(self, normalized_shape: int):
self.set_sample_config(normalized_shape)
m = nn.LayerNorm(normalized_shape, self.eps, self.elementwise_affine)
if m.elementwise_affine:
m = m.to(self.weight.device)
m = m.to(self.weight.dtype)
m.weight.data.copy_(self.weights)
m.bias.data.copy_(self.biases)
return m.eval()
@classmethod
def build_from(cls, m: nn.LayerNorm):
normalized_shape = m.normalized_shape
eps = m.eps
elementwise_affine = m.elementwise_affine
_m = cls(normalized_shape, eps, elementwise_affine)
if _m.elementwise_affine:
_m = _m.to(m.weight.device)
_m = _m.to(m.weight.dtype)
_m.weight.data.copy_(m.weight)
_m.bias.data.copy_(m.bias)
return _m
|
class ConvFeatureExtractionModel(nn.Module):
def __init__(self, conv_layers: List[Tuple[(int, int, int)]], dropout: float=0.0, mode: str='default', conv_bias: bool=False):
super().__init__()
assert (mode in {'default', 'layer_norm'})
def block(n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert ((is_layer_norm and is_group_norm) == False), 'layer norm and group norm are exclusive'
if is_layer_norm:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.Sequential(TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=True), TransposeLast()), nn.GELU())
elif is_group_norm:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), Fp32GroupNorm(dim, dim, affine=True), nn.GELU())
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for (i, cl) in enumerate(conv_layers):
assert (len(cl) == 3), ('invalid conv definition: ' + str(cl))
(dim, k, stride) = cl
self.conv_layers.append(block(in_d, dim, k, stride, is_layer_norm=(mode == 'layer_norm'), is_group_norm=((mode == 'default') and (i == 0)), conv_bias=conv_bias))
in_d = dim
def forward(self, x):
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
|
class Spectrogram(nn.Module):
def __init__(self, cfg, **kwargs):
super(Spectrogram, self).__init__()
self.eps = 1e-08
self.cfg = cfg
self.n_fft = cfg['spectrogram']['n_fft']
self.hop_length = cfg['spectrogram']['hop_length']
self.win_length = cfg['spectrogram']['win_length']
if (cfg['spectrogram']['window'] == 'hann'):
self.window = torch.hann_window(cfg['spectrogram']['win_length']).to(device)
else:
raise ValueError('Window type not defined.')
self.center = cfg['spectrogram']['center']
self.log = cfg['spectrogram']['log']
def get_output_dim(self):
return ((self.n_fft // 2) + 1)
def get_downsample_rate(self):
return self.hop_length
def forward(self, waveform):
x = torch.transpose(torch.abs(torch.stft(waveform, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=self.center, pad_mode='reflect', normalized=False, return_complex=True)), 0, 1)
if self.log:
x = torch.log(torch.clamp(x, min=self.eps))
return x
|
class UpstreamExpert(nn.Module):
'\n Extract spectrogram features from wavforms with torchaudio\n '
def __init__(self, model_config=None, **kwargs):
super(UpstreamExpert, self).__init__()
with open(model_config, 'r') as file:
self.config = yaml.load(file, Loader=yaml.FullLoader)
self.extracter = Spectrogram(self.config)
self.output_dim = self.extracter.get_output_dim()
self.downsample_rate = self.extracter.get_downsample_rate()
def get_downsample_rates(self, key: str) -> int:
return self.downsample_rate
def _extractor_forward(self, wavs):
feats = []
for wav in wavs:
feats.append(self.extracter(wav))
return feats
def forward(self, wavs):
feats = self._extractor_forward(wavs)
feats = pad_sequence(feats, batch_first=True)
return {'last_hidden_state': [feats], 'hidden_states': [feats]}
|
def stft_mag(model_config, *args, **kwargs):
assert os.path.isfile(model_config)
return _UpstreamExpert(model_config, *args, **kwargs)
|
def mae_ast_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def mae_ast_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mae_ast_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def mae_ast_frame(refresh=False, *args, **kwargs):
'\n The MAE-AST Frame model, 12-layered, random masking\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://www.cs.utexas.edu/~harwath/model_checkpoints/mae_ast/random_frame_75_12LayerEncoder.pt'
return mae_ast_url(*args, refresh=refresh, **kwargs)
|
def mae_ast_patch(refresh=False, *args, **kwargs):
'\n The MAE-AST Patch model, 12-layered, chunked masking\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://www.cs.utexas.edu/~harwath/model_checkpoints/mae_ast/chunk_patch_75_12LayerEncoder.pt'
return mae_ast_url(*args, refresh=refresh, **kwargs)
|
@dataclass
class MAE_AST_Pretraining_Config():
data: str = field(default=MISSING, metadata={'help': 'path to data directory'})
sample_rate: int = field(default=16000, metadata={'help': 'target sample rate. audio files will be up/down sampled to this rate'})
normalize: bool = field(default=False, metadata={'help': 'if set, normalizes input to have 0 mean and unit variance'})
enable_padding: bool = field(default=False, metadata={'help': 'pad shorter samples instead of cropping'})
max_keep_size: Optional[int] = field(default=None, metadata={'help': 'exclude sample longer than this'})
max_sample_size: Optional[int] = field(default=None, metadata={'help': 'max sample size to crop to for batching'})
min_sample_size: Optional[int] = field(default=None, metadata={'help': 'min sample size to crop to for batching'})
random_crop: Optional[bool] = field(default=True, metadata={'help': 'always crop from the beginning if false'})
pad_audio: Optional[bool] = field(default=False, metadata={'help': 'pad audio to the longest one in the batch if true'})
feature_type: Optional[str] = field(default='wav', metadata={'help': "choose from ['wav', 'spectrogram', 'fbank', 'mfcc']"})
feature_rate: Optional[int] = field(default=100, metadata={'help': 'rate of feature input to the transformer, if use wav, this arg is omited, else if use spectrogram/fbank/mfcc, the default is 100, i.e. 1s audio gives 100 frames. the label rate of using MFCC is also 100'})
feature_dim: Optional[int] = field(default=100, metadata={'help': 'dim feature input to the transformer, if use wav, this arg is omited, else if use spectrogram/fbank/mfcc, the default is 80'})
deltas: Optional[bool] = field(default=True, metadata={'help': 'whether or not add delta and delta-delta to the feature, only effective for spectrogram/fbank/mfcc'})
mask_spans: Optional[bool] = field(default=False, metadata={'help': 'mask random spans, same as that is used in HuBERT and w2v2'})
mask_type: MASK_TYPE_CHOICES = field(default='random_mask', metadata={'help': 'Determine type of mask for MAE pretraining. \n -retain_spans: Only for frame data. Wav2Vec2 like masking.\n -random_mask: Perform masking on completely random tokens. No chunking. Used in MAE\n -random_mask_batched: random_mask with the same mask across the batch.\n -chunk_mask: Perform masking on chunks until mask_spans hit. From SSAST. Same across batch for speed.\n '})
|
class MAE_AST_Pretraining_Task():
cfg: MAE_AST_Pretraining_Config
def __init__(self, cfg: MAE_AST_Pretraining_Config) -> None:
super().__init__(cfg)
logger.info(f'current directory is {os.getcwd()}')
logger.info(f'MAEPretrainingTask Config {cfg}')
self.cfg = cfg
@property
def source_dictionary(self):
return None
@property
def target_dictionary(self):
return None
@property
def dictionaries(self):
return None
@classmethod
def setup_task(cls, cfg: MAE_AST_Pretraining_Config, **kwargs) -> 'MAE_AST_Pretraining_Task':
return cls(cfg)
def load_dataset(self, split: str, **kwargs) -> None:
raise NotImplementedError('This part is not used thus not included in S3PRL, please visit the official repo.')
def max_positions(self) -> Tuple[(int, int)]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(self, indices: np.array, *args, **kwargs) -> np.array:
return indices
|
class UpstreamExpert(UpstreamBase):
'\n The Mockingjay wrapper\n '
def __init__(self, ckpt, options_config=None, **kwargs):
super().__init__(**kwargs)
if (options_config is not None):
print('[UpstreamExpert] - Using upstream expert config file from:', options_config)
with open(options_config, 'r') as file:
options = yaml.load(file, Loader=yaml.FullLoader)
else:
print('[UpstreamExpert] - Using the default upstream expert config')
options = {'load_pretrain': 'True', 'no_grad': 'False', 'dropout': 'default', 'spec_aug': 'False', 'spec_aug_prev': 'True', 'output_hidden_states': 'True', 'permute_input': 'False'}
options['ckpt_file'] = ckpt
options['select_layer'] = (- 1)
self.transformer = PretrainedTransformer(options, inp_dim=(- 1))
assert hasattr(self.transformer, 'extracter'), 'This wrapper only supports `on-the-fly` ckpt with built in feature extracters.'
self.transformer([torch.randn(16000)])
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
(last_hidden_state, hidden_states) = self.transformer(wavs)
return {'last_hidden_state': last_hidden_state, 'hidden_states': hidden_states.unbind(dim=0)}
|
def mockingjay_local(ckpt, options_config=None, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n '
assert os.path.isfile(ckpt)
if (options_config is not None):
assert os.path.isfile(options_config)
return _UpstreamExpert(ckpt, options_config, *args, **kwargs)
|
def mockingjay_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return mockingjay_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def mockingjay(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_origin(*args, refresh=refresh, **kwargs)
|
def mockingjay_origin(refresh=False, *args, **kwargs):
'\n The mockingjay large model on 360hr, with Lel as input and Linear as target\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(*args, refresh=refresh, **kwargs)
|
def mockingjay_100hr(refresh=False, *args, **kwargs):
'\n The mockingjay base model on 100hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelBase_T_AdamW_b32_200k_100hr(*args, refresh=refresh, **kwargs)
|
def mockingjay_960hr(refresh=False, *args, **kwargs):
'\n The mockingjay base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_200k_100hr(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 200k\n Unlabled Speech: 100hr\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/luorglf8mdg67l2/states-200000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel (input) / 201-dim Linear (target)\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 500k\n Unlabled Speech: 360hr\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/mockingjay/resolve/main/mockingjay_logMelLinearLarge_T_AdamW_b32_500k_360hr_drop1/states-500000.ckpt'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/jzx0xggk663jev6/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: Dropout of 0.1 (instead of 0.3)\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/7f9z6dzc7oix6qv/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
def mockingjay_logMelBase_T_AdamW_b32_1m_960hr_seq3k(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n Differences: sequence length of 3k (instead of 1.5k)\n '
kwargs['ckpt'] = 'https://www.dropbox.com/s/qnnvdrai2tfmjmh/states-1000000.ckpt?dl=1'
return mockingjay_url(*args, refresh=refresh, **kwargs)
|
class TransformerConfig(object):
'Configuration class to store the configuration of a `TransformerModel`.'
def __init__(self, config):
self.hidden_size = int(config['hidden_size'])
self.num_hidden_layers = int(config['num_hidden_layers'])
self.num_attention_heads = int(config['num_attention_heads'])
self.hidden_act = str(config['hidden_act'])
self.intermediate_size = int(config['intermediate_size'])
self.hidden_dropout_prob = float(config['hidden_dropout_prob'])
self.attention_probs_dropout_prob = float(config['attention_probs_dropout_prob'])
self.initializer_range = float(config['initializer_range'])
self.layer_norm_eps = float(config['layer_norm_eps'])
self.share_layer = bool(config['share_layer'])
self.pre_layer_norm = bool(config['pre_layer_norm'])
|
def prune_linear_layer(layer, index, dim=0):
'Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n '
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if (layer.bias is not None):
if (dim == 1):
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=(layer.bias is not None)).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if (layer.bias is not None):
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
|
def gelu(x):
"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class TransformerLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'Construct a layernorm module in the TF style (epsilon inside the square root).'
super(TransformerLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class TransformerInputRepresentations(nn.Module):
'Construct the input representation from spectrogram, and position encodings.'
def __init__(self, config, input_dim):
super(TransformerInputRepresentations, self).__init__()
self.hidden_size = config.hidden_size
self.spec_transform = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, spec, pos_enc):
spec_transformed = self.spec_transform(spec)
input_representations = (spec_transformed + pos_enc)
input_representations = self.LayerNorm(input_representations)
input_representations = self.dropout(input_representations)
return input_representations
|
class TransformerSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerSelfAttention, self).__init__()
if ((config.hidden_size % config.num_attention_heads) != 0):
raise ValueError(('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int((config.hidden_size / config.num_attention_heads))
self.all_head_size = (self.num_attention_heads * self.attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_scores = (attention_scores + attention_mask)
attention_probs = nn.Softmax(dim=(- 1))(attention_scores)
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return (attention_probs, context_layer)
return context_layer
|
class TransformerSelfOutput(nn.Module):
def __init__(self, config):
super(TransformerSelfOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerAttention, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.self = TransformerSelfAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.output = TransformerSelfOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def prune_heads(self, heads):
if (len(heads) == 0):
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view((- 1)).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = (self.self.num_attention_heads - len(heads))
self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
def forward(self, input_tensor, attention_mask, head_mask=None):
if self.pre_layer_norm:
self_output = self.LayerNorm(input_tensor)
self_output = self.self(self_output, attention_mask, head_mask)
else:
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
(attentions, self_output) = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return (attentions, attention_output)
return attention_output
|
class TransformerIntermediate(nn.Module):
def __init__(self, config):
super(TransformerIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TransformerOutput(nn.Module):
def __init__(self, config):
super(TransformerOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerLayer, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.attention = TransformerAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.intermediate = TransformerIntermediate(config)
self.output = TransformerOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
(attentions, attention_output) = attention_output
if self.pre_layer_norm:
intermediate_output = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(intermediate_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return (attentions, layer_output)
return layer_output
|
class TransformerEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerEncoder, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
layer = TransformerLayer(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
if config.share_layer:
self.layer = nn.ModuleList([layer for _ in range(config.num_hidden_layers)])
else:
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if self.pre_layer_norm:
LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = nn.ModuleList([copy.deepcopy(LayerNorm) for _ in range((config.num_hidden_layers + 1))])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for (i, layer_module) in enumerate(self.layer):
if output_all_encoded_layers:
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[i](hidden_states))
else:
all_encoder_layers.append(hidden_states)
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
(attentions, hidden_states) = hidden_states
all_attentions.append(attentions)
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[(- 1)](hidden_states))
else:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return (all_attentions, all_encoder_layers)
return all_encoder_layers
|
class TransformerSpecPredictionHead(nn.Module):
def __init__(self, config, output_dim, input_dim=None):
super(TransformerSpecPredictionHead, self).__init__()
self.output_dim = output_dim
if (input_dim is None):
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(input_dim, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.output = nn.Linear(config.hidden_size, self.output_dim)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
linear_output = self.output(hidden_states)
return (linear_output, hidden_states)
|
class TransformerInitModel(nn.Module):
'An abstract class to handle weights initialization.'
def __init__(self, config, output_attentions, *inputs, **kwargs):
super(TransformerInitModel, self).__init__()
self.config = config
self.output_attentions = output_attentions
def init_Transformer_weights(self, module):
'Initialize the weights.'
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, TransformerLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.