code stringlengths 17 6.64M |
|---|
def sample_wavs_and_dump_txt(root, dev_ids, numbers, meta_data_name):
wav_list = []
count_positive = 0
for _ in range(numbers):
prob = random.random()
if (prob > 0.5):
dev_id_pair = random.sample(dev_ids, 2)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[1]))).split('/')[(- 3):])
label = '0'
wav_list.append(' '.join([label, sample1, sample2]))
else:
dev_id_pair = random.sample(dev_ids, 1)
sample1 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
sample2 = '/'.join(random.choice(find_files(os.path.join(root, dev_id_pair[0]))).split('/')[(- 3):])
label = '1'
count_positive += 1
wav_list.append(' '.join([label, sample1, sample2]))
f = open(meta_data_name, 'w')
for data in wav_list:
f.write((data + '\n'))
f.close()
return wav_list
|
def EER(labels, scores):
'\n labels: (N,1) value: 0,1\n\n scores: (N,1) value: -1 ~ 1\n\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores)
s = interp1d(fpr, tpr)
a = (lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x)))
eer = brentq(a, 0.0, 1.0)
thresh = interp1d(fpr, thresholds)(eer)
return (eer, thresh)
|
def eer_yist_f(labels, scores):
'\n Args:\n labels: (N,1) with value being 0 or 1\n scores: (N,1) within [-1, 1]\n\n Returns:\n equal_error_rates\n threshold\n '
joints = sorted(zip(scores, labels), key=(lambda x: x[0]))
(sorted_scores, sorted_labels) = zip(*joints)
total_ones = sum(sorted_labels)
total_zeros = (len(sorted_labels) - total_ones)
prefsum_ones = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=1), initial=0))
prefsum_zeros = list(accumulate(sorted_labels, partial(_count_labels, label_to_count=0), initial=0))
ext_scores = [(- 1.0), *sorted_scores, 1.0]
(thresh_left, thresh_right) = (0, len(ext_scores))
while True:
if (thresh_left == thresh_right):
break
thresh_idx = ((thresh_left + thresh_right) // 2)
nb_false_positives = (total_zeros - prefsum_zeros[thresh_idx])
nb_false_negatives = prefsum_ones[thresh_idx]
if (nb_false_positives > nb_false_negatives):
thresh_left = thresh_idx
elif (nb_false_positives < nb_false_negatives):
thresh_right = thresh_idx
else:
break
thresh = ((ext_scores[thresh_idx] + ext_scores[(thresh_idx + 1)]) / 2)
false_negative_ratio = (nb_false_negatives / len(labels))
false_positive_ratio = (nb_false_positives / len(labels))
equal_error_rate = ((false_positive_ratio + false_negative_ratio) / 2)
return (equal_error_rate, thresh)
|
def _count_labels(counted_so_far, label, label_to_count=0):
return ((counted_so_far + 1) if (label == label_to_count) else counted_so_far)
|
def compute_metrics(input_x_speaker, ylabel):
wav1 = []
wav2 = []
for i in range(len(ylabel)):
wav1.append(input_x_speaker[i].unsqueeze(0))
wav2.append(input_x_speaker[(len(ylabel) + i)].unsqueeze(0))
wav1 = torch.stack(wav1)
wav2 = torch.stack(wav2)
ylabel = torch.stack(ylabel).cpu().detach().long().tolist()
scores = self.score_fn(wav1, wav2).squeeze().cpu().detach().tolist()
return (scores, ylabel)
|
def options(only_registered_ckpt: bool=False):
all_options = []
for (name, value) in globals().items():
torch_hubconf_policy = ((not name.startswith('_')) and callable(value))
if (torch_hubconf_policy and (name != 'options')):
if (only_registered_ckpt and (name.endswith('_local') or name.endswith('_url') or name.endswith('_gdriveid') or name.endswith('_custom'))):
continue
all_options.append(name)
return all_options
|
def main():
try:
cls = getattr(problem, sys.argv[1])
except:
available_problems = [name for name in dir(problem) if ((not name.startswith('_')) and isinstance(getattr(problem, name), type))]
print(traceback.format_exc())
print(f'''Usage:
1. s3prl-main [PROBLEM] -h
2. python3 -m s3prl.main [PROBLEM] -h
3. python3 s3prl/main.py [PROBLEM] -h
PROBLEM should be an available class name in the s3prl.problem package.
Available options: {', '.join(available_problems)}''')
exit(0)
cls().main(sys.argv[2:])
|
def accuracy(xs, ys, item_same_fn=None):
if isinstance(xs, (tuple, list)):
assert isinstance(ys, (tuple, list))
return _accuracy_impl(xs, ys, item_same_fn)
elif isinstance(xs, dict):
assert isinstance(ys, dict)
keys = sorted(list(xs.keys()))
xs = [xs[k] for k in keys]
ys = [ys[k] for k in keys]
return _accuracy_impl(xs, ys, item_same_fn)
else:
raise ValueError
|
def _accuracy_impl(xs, ys, item_same_fn=None):
item_same_fn = (item_same_fn or (lambda x, y: (x == y)))
same = [int(item_same_fn(x, y)) for (x, y) in zip(xs, ys)]
return (sum(same) / len(same))
|
def ter(hyps: List[Union[(str, List[str])]], refs: List[Union[(str, List[str])]]) -> float:
'Token error rate calculator.\n\n Args:\n hyps (List[Union[str, List[str]]]): List of hypotheses.\n refs (List[Union[str, List[str]]]): List of references.\n\n Returns:\n float: Averaged token error rate overall utterances.\n '
error_tokens = 0
total_tokens = 0
for (h, r) in zip(hyps, refs):
error_tokens += ed.eval(h, r)
total_tokens += len(r)
return (float(error_tokens) / float(total_tokens))
|
def wer(hyps: List[str], refs: List[str]) -> float:
'Word error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged word error rate overall utterances.\n '
hyps = [h.split(' ') for h in hyps]
refs = [r.split(' ') for r in refs]
return ter(hyps, refs)
|
def per(hyps: List[str], refs: List[str]) -> float:
'Phoneme error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged phoneme error rate overall utterances.\n '
return wer(hyps, refs)
|
def cer(hyps: List[str], refs: List[str]) -> float:
'Character error rate calculator.\n\n Args:\n hyps (List[str]): List of hypotheses.\n refs (List[str]): List of references.\n\n Returns:\n float: Averaged character error rate overall utterances.\n '
return ter(hyps, refs)
|
def compute_eer(labels: List[int], scores: List[float]):
'Compute equal error rate.\n\n Args:\n scores (List[float]): List of hypotheses.\n labels (List[int]): List of references.\n\n Returns:\n eer (float): Equal error rate.\n treshold (float): The treshold to accept a target trial.\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores, pos_label=1)
eer = brentq((lambda x: ((1.0 - x) - interp1d(fpr, tpr)(x))), 0.0, 1.0)
threshold = interp1d(fpr, thresholds)(eer)
return (eer, threshold)
|
def compute_minDCF(labels: List[int], scores: List[float], p_target: float=0.01, c_miss: int=1, c_fa: int=1):
'Compute MinDCF.\n Computes the minimum of the detection cost function. The comments refer to\n equations in Section 3 of the NIST 2016 Speaker Recognition Evaluation Plan.\n\n Args:\n scores (List[float]): List of hypotheses.\n labels (List[int]): List of references.\n p (float): The prior probability of positive class.\n c_miss (int): The cost of miss.\n c_fa (int): The cost of false alarm.\n\n Returns:\n min_dcf (float): The calculated min_dcf.\n min_c_det_threshold (float): The treshold to calculate min_dcf.\n '
(fpr, tpr, thresholds) = roc_curve(labels, scores, pos_label=1)
fnr = (1.0 - tpr)
min_c_det = float('inf')
min_c_det_threshold = thresholds[0]
for i in range(0, len(fnr)):
c_det = (((c_miss * fnr[i]) * p_target) + ((c_fa * fpr[i]) * (1 - p_target)))
if (c_det < min_c_det):
min_c_det = c_det
min_c_det_threshold = thresholds[i]
c_def = min((c_miss * p_target), (c_fa * (1 - p_target)))
min_dcf = (min_c_det / c_def)
return (min_dcf, min_c_det_threshold)
|
def clean(ref: str) -> str:
ref = re.sub('B\\-(\\S+) ', '', ref)
ref = re.sub(' E\\-(\\S+)', '', ref)
return ref
|
def parse(hyp: str, ref: str) -> Tuple[(str, str, str, str)]:
gex = re.compile('B\\-(\\S+) (.+?) E\\-\\1')
hyp = re.sub(' +', ' ', hyp)
ref = re.sub(' +', ' ', ref)
hyp_slots = gex.findall(hyp)
ref_slots = gex.findall(ref)
ref_slots = ';'.join([':'.join([x[1], x[0]]) for x in ref_slots])
if (len(hyp_slots) > 0):
hyp_slots = ';'.join([':'.join([clean(x[1]), x[0]]) for x in hyp_slots])
else:
hyp_slots = ''
ref = clean(ref)
hyp = clean(hyp)
return (ref, hyp, ref_slots, hyp_slots)
|
def get_slot_dict(hyp: str, ref: str) -> Tuple[(Dict[(str, List[str])], Dict[(str, List[str])])]:
(ref_text, hyp_text, ref_slots, hyp_slots) = parse(hyp, ref)
ref_slots = ref_slots.split(';')
hyp_slots = hyp_slots.split(';')
(ref_dict, hyp_dict) = ({}, {})
if (ref_slots[0] != ''):
for ref_slot in ref_slots:
(v, k) = ref_slot.split(':')
ref_dict.setdefault(k, [])
ref_dict[k].append(v)
if (hyp_slots[0] != ''):
for hyp_slot in hyp_slots:
(v, k) = hyp_slot.split(':')
hyp_dict.setdefault(k, [])
hyp_dict[k].append(v)
return (ref_dict, hyp_dict)
|
def slot_type_f1(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
F1s = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
if ((len(hyp_dict.keys()) == 0) and (len(ref_dict.keys()) == 0)):
F1 = 1.0
elif (len(hyp_dict.keys()) == 0):
F1 = 0.0
elif (len(ref_dict.keys()) == 0):
F1 = 0.0
else:
(P, R) = (0.0, 0.0)
for slot in ref_dict:
if (slot in hyp_dict):
R += 1
R = (R / len(ref_dict.keys()))
for slot in hyp_dict:
if (slot in ref_dict):
P += 1
P = (P / len(hyp_dict.keys()))
F1 = ((((2 * P) * R) / (P + R)) if ((P + R) > 0) else 0.0)
F1s.append(F1)
return (sum(F1s) / len(F1s))
|
def slot_value_cer(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
(value_hyps, value_refs) = ([], [])
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_cer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_cer = cer([hyp_v], [ref_v])
if (min_cer > tmp_cer):
min_cer = tmp_cer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return cer(value_hyps, value_refs)
|
def slot_value_wer(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
value_hyps = []
value_refs = []
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
for slot in unique_slots:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
hyp_v = ''
value_refs.append(ref_v)
value_hyps.append(hyp_v)
else:
min_wer = 100
best_hyp_v = ''
for hyp_v in hyp_dict[slot]:
tmp_wer = wer([hyp_v], [ref_v])
if (min_wer > tmp_wer):
min_wer = tmp_wer
best_hyp_v = hyp_v
value_refs.append(ref_v)
value_hyps.append(best_hyp_v)
return wer(value_hyps, value_refs)
|
def slot_edit_f1(hypothesis: List[str], groundtruth: List[str], loop_over_all_slot: bool, **kwargs) -> float:
slot2F1 = {}
for (p, t) in zip(hypothesis, groundtruth):
(ref_dict, hyp_dict) = get_slot_dict(p, t)
unique_slots = list(ref_dict.keys())
if loop_over_all_slot:
unique_slots += [x for x in hyp_dict if (x not in ref_dict)]
for slot in unique_slots:
TP = 0
FP = 0
FN = 0
if (slot not in ref_dict):
for hyp_v in hyp_dict[slot]:
FP += 1
else:
for (ref_i, ref_v) in enumerate(ref_dict[slot]):
if (slot not in hyp_dict):
FN += 1
else:
match = False
for hyp_v in hyp_dict[slot]:
if (hyp_v == ref_v):
match = True
break
if match:
TP += 1
else:
FN += 1
FP += 1
slot2F1.setdefault(slot, [0, 0, 0])
slot2F1[slot][0] += TP
slot2F1[slot][1] += FN
slot2F1[slot][2] += FP
(all_TPs, all_FNs, all_FPs) = (0, 0, 0)
for slot in slot2F1.keys():
all_TPs += slot2F1[slot][0]
all_FNs += slot2F1[slot][1]
all_FPs += slot2F1[slot][2]
return ((2 * all_TPs) / (((2 * all_TPs) + all_FPs) + all_FNs))
|
def slot_edit_f1_full(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=True, **kwargs)
|
def slot_edit_f1_part(hypothesis: List[str], groundtruth: List[str], **kwargs) -> float:
return slot_edit_f1(hypothesis, groundtruth, loop_over_all_slot=False, **kwargs)
|
class BeamDecoder(object):
'Beam decoder powered by flashlight.\n\n Args:\n token (str, optional): Path to dictionary file. Defaults to "".\n lexicon (str, optional): Path to lexicon file. Defaults to "".\n lm (str, optional): Path to KenLM file. Defaults to "".\n nbest (int, optional): Returns nbest hypotheses. Defaults to 1.\n beam (int, optional): Beam size. Defaults to 5.\n beam_size_token (int, optional): Token beam size. Defaults to -1.\n beam_threshold (float, optional): Beam search log prob threshold. Defaults to 25.0.\n lm_weight (float, optional): language model weight. Defaults to 2.0.\n word_score (float, optional): score for words appearance in the transcription. Defaults to -1.0.\n unk_score (float, optional): score for unknown word appearance in the transcription. Defaults to -math.inf.\n sil_score (float, optional): score for silence appearance in the transcription. Defaults to 0.0.\n '
def __init__(self, token: str='', lexicon: str='', lm: str='', nbest: int=1, beam: int=5, beam_size_token: int=(- 1), beam_threshold: float=25.0, lm_weight: float=2.0, word_score: float=(- 1.0), unk_score: float=(- math.inf), sil_score: float=0.0):
try:
from flashlight.lib.text.decoder import CriterionType, KenLM, LexiconDecoder, LexiconDecoderOptions, SmearingMode, Trie
from flashlight.lib.text.dictionary import Dictionary, create_word_dict, load_words
except ImportError:
logger.error(f'Please install Flashlight Text from https://github.com/flashlight/text to enable {__class__.__name__}')
raise
if (token == ''):
token = _urls_to_filepaths(TOKEN_URL)
if (lexicon == ''):
lexicon = _urls_to_filepaths(LEXICON_URL_1)
if (lm == ''):
lm = _urls_to_filepaths(LM_URL_1)
self.nbest = nbest
self.token_dict = Dictionary(token)
self.lexicon = load_words(lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.lm = KenLM(lm, self.word_dict)
self.sil_idx = self.token_dict.get_index('|')
self.unk_idx = self.word_dict.get_index('<unk>')
self.trie = Trie(self.token_dict.index_size(), self.sil_idx)
start_state = self.lm.start(False)
for (word, spellings) in self.lexicon.items():
usr_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, usr_idx)
for spelling in spellings:
spelling_idxs = [self.token_dict.get_index(tok) for tok in spelling]
self.trie.insert(spelling_idxs, usr_idx, score)
self.trie.smear(SmearingMode.MAX)
if (beam_size_token == (- 1)):
beam_size_token = self.token_dict.index_size()
self.options = LexiconDecoderOptions(beam_size=beam, beam_size_token=beam_size_token, beam_threshold=beam_threshold, lm_weight=lm_weight, word_score=word_score, unk_score=unk_score, sil_score=sil_score, log_add=False, criterion_type=CriterionType.CTC)
self.blank_idx = self.token_dict.get_index('#')
self.decoder = LexiconDecoder(self.options, self.trie, self.lm, self.sil_idx, self.blank_idx, self.unk_idx, [], False)
def get_tokens(self, idxs: Iterable) -> torch.LongTensor:
'Normalize tokens by handling CTC blank, ASG replabels, etc.\n\n Args:\n idxs (Iterable): Token ID list output by self.decoder\n\n Returns:\n torch.LongTensor: Token ID list after normalization.\n '
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter((lambda x: (x != self.blank_idx)), idxs)
return torch.LongTensor(list(idxs))
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
'Returns frame numbers corresponding to every non-blank token.\n\n Args:\n token_idxs (List[int]): IDs of decoded tokens.\n\n Returns:\n List[int]: Frame numbers corresponding to every non-blank token.\n '
timesteps = []
for (i, token_idx) in enumerate(token_idxs):
if (token_idx == self.blank_idx):
continue
if ((i == 0) or (token_idx != token_idxs[(i - 1)])):
timesteps.append(i)
return timesteps
def decode(self, emissions: torch.Tensor) -> List[List[dict]]:
'Decode sequence.\n\n Args:\n emissions (torch.Tensor): Emission probabilities (in log scale).\n\n Returns:\n List[List[dict]]: Decoded hypotheses.\n '
emissions = emissions.float().contiguous().cpu()
(B, T, N) = emissions.size()
hyps = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[:self.nbest]
hyps.append([dict(tokens=self.get_tokens(result.tokens), score=result.score, timesteps=self.get_timesteps(result.tokens), words=[self.word_dict.get_entry(x) for x in result.words if (x >= 0)]) for result in nbest_results])
return hyps
|
class FrameLevel(nn.Module):
"\n The common frame-to-frame probing model\n\n Args:\n input_size (int): input size\n output_size (int): output size\n hidden_sizes (List[int]): a list of hidden layers' hidden size.\n by default is [256] to project all different input sizes to the same dimension.\n set empty list to use the vanilla single layer linear model\n activation_type (str): the activation class name in :obj:`torch.nn`. Set None to\n disable activation and the model is pure linear. Default: None\n activation_conf (dict): the arguments for initializing the activation class.\n Default: empty dict\n "
def __init__(self, input_size: int, output_size: int, hidden_sizes: List[int]=None, activation_type: str=None, activation_conf: dict=None):
super().__init__()
self._indim = input_size
self._outdim = output_size
hidden_sizes = (hidden_sizes or [256])
latest_size = input_size
hidden_layers = []
if (len(hidden_sizes) > 0):
for size in hidden_sizes:
hidden_layers.append(nn.Linear(latest_size, size))
if (activation_type is not None):
hidden_layers.append(getattr(nn, activation_type)(**(activation_conf or {})))
latest_size = size
self.hidden_layers = nn.Sequential(*hidden_layers)
self.final_proj = nn.Linear(latest_size, output_size)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple\n\n 1. ys (torch.FloatTensor): (batch_size, seq_len, output_size)\n 2. ys_len (torch.LongTensor): (batch_size, )\n '
ys = self.hidden_layers(x)
ys = self.final_proj(ys)
return (ys, x_len)
|
class UtteranceLevel(nn.Module):
"\n Args:\n input_size (int): input_size\n output_size (int): output_size\n hidden_sizes (List[int]): a list of hidden layers' hidden size.\n by default is [256] to project all different input sizes to the same dimension.\n set empty list to use the vanilla single layer linear model\n activation_type (str): the activation class name in :obj:`torch.nn`. Set None to\n disable activation and the model is pure linear. Default: None\n activation_conf (dict): the arguments for initializing the activation class.\n Default: empty dict\n pooling_type (str): the pooling class name in :obj:`s3prl.nn.pooling`. Default: MeanPooling\n pooling_conf (dict): the arguments for initializing the pooling class.\n Default: empty dict\n "
def __init__(self, input_size: int, output_size: int, hidden_sizes: List[int]=None, activation_type: str=None, activation_conf: dict=None, pooling_type: str='MeanPooling', pooling_conf: dict=None):
super().__init__()
self._indim = input_size
self._outdim = output_size
hidden_sizes = (hidden_sizes or [256])
latest_size = input_size
hidden_layers = []
if (len(hidden_sizes) > 0):
for size in hidden_sizes:
hidden_layers.append(nn.Linear(latest_size, size))
if (activation_type is not None):
hidden_layers.append(getattr(nn, activation_type)(**(activation_conf or {})))
latest_size = size
self.hidden_layers = nn.Sequential(*hidden_layers)
pooling_conf = (pooling_conf or {})
self.pooling = getattr(pooling, pooling_type)(latest_size, **pooling_conf)
latest_size = self.pooling.output_size
self.final_proj = nn.Linear(latest_size, output_size)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n torch.FloatTensor\n\n (batch_size, output_size)\n '
x = self.hidden_layers(x)
x_pooled = self.pooling(x, x_len)
y = self.final_proj(x_pooled)
return y
|
class HearFullyConnectedPrediction(torch.nn.Module):
'\n The specific prediction head used in the Hear Benchmark.\n Modified from: https://github.com/hearbenchmark/hear-eval-kit/blob/855964977238e89dfc76394aa11c37010edb6f20/heareval/predictions/task_predictions.py#L142\n\n Args:\n input_size (int): input_size\n output_size (int): output_size\n hidden_size (int): hidden size across all layers. Default: 1024\n hidden_layers (int): number of hidden layers, all in :code:`hidden_size`. Default: 2\n norm_after_activation (bool): whether to norm after activation. Default: False\n dropout (float): dropout ratio. Default: 0.1\n initialization (str): initialization method name available in :obj:`torch.nn.init`\n hidden_norm (str): normalization method name available in :obj:`torch.nn`\n pooling_type (str): the pooling class name in :obj:`s3prl.nn.pooling`. Default: MeanPooling\n pooling_conf (dict): the arguments for initializing the pooling class.\n Default: empty dict\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=1024, hidden_layers: int=2, norm_after_activation: bool=False, dropout: float=0.1, initialization: str='xavier_uniform_', hidden_norm: str='BatchNorm1d', pooling_type: str=None, pooling_conf: dict=None):
super().__init__()
self._input_size = input_size
self._output_size = output_size
initialization = getattr(torch.nn.init, initialization)
hidden_norm = getattr(torch.nn, hidden_norm)
curdim = input_size
if (pooling_type is not None):
pooling_cls = getattr(pooling, pooling_type)
self.pooling = pooling_cls(input_size, **(pooling_conf or {}))
curdim = self.pooling.output_size
hidden_modules: List[torch.nn.Module] = []
last_activation = 'linear'
if hidden_layers:
for i in range(hidden_layers):
linear = torch.nn.Linear(curdim, hidden_size)
initialization(linear.weight, gain=torch.nn.init.calculate_gain(last_activation))
hidden_modules.append(linear)
if (not norm_after_activation):
hidden_modules.append(hidden_norm(hidden_size))
hidden_modules.append(torch.nn.Dropout(dropout))
hidden_modules.append(torch.nn.ReLU())
if norm_after_activation:
hidden_modules.append(hidden_norm(hidden_size))
curdim = hidden_size
last_activation = 'relu'
self.hidden = torch.nn.Sequential(*hidden_modules)
else:
self.hidden = torch.nn.Identity()
self.projection = torch.nn.Linear(curdim, output_size)
initialization(self.projection.weight, gain=torch.nn.init.calculate_gain(last_activation))
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return self._output_size
def forward(self, x, x_len) -> torch.Tensor:
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple:\n\n 1. y (torch.FloatTensor)\n 2. y_len (torch.LongTensor)\n\n if :code:`pooling_type` is None, :code:`y` is (batch_size, seq_len, output_size) and :code:`y_len` is (batch_size, )\n if not None, :code:`y` is (batch_size, output_size) and :code:`y_len` is (batch_size, ) in all 1s.\n '
if hasattr(self, 'pooling'):
x = self.pooling(x, x_len)
x_len = x.new_ones(len(x))
shape = x.shape
if (len(shape) == 3):
(bs, ts, hidden_size) = x.shape
x = x.reshape((bs * ts), hidden_size)
x = self.hidden(x)
x = self.projection(x)
if (len(shape) == 3):
x = x.reshape(bs, ts, (- 1))
return (x, x_len)
|
class AbsUpstream(nn.Module):
'\n The upstream model should follow this interface. Please subclass it.\n '
@property
def num_layer(self) -> int:
'\n number of hidden states\n '
raise NotImplementedError
@property
def hidden_sizes(self) -> List[int]:
'\n hidden size of each hidden state\n '
raise NotImplementedError
@property
def downsample_rates(self) -> List[int]:
'\n downsample rate from 16 KHz waveforms for each hidden state\n '
raise NotImplementedError
def forward(self, wavs: torch.FloatTensor, wavs_len: torch.LongTensor) -> Tuple[(List[torch.FloatTensor], List[torch.LongTensor])]:
'\n Args:\n wavs (torch.FloatTensor): (batch_size, seq_len, 1)\n wavs_len (torch.LongTensor): (batch_size, )\n\n Returns:\n tuple:\n\n 1. all_hs (List[torch.FloatTensor]): all the hidden states\n 2. all_hs_len (List[torch.LongTensor]): the lengths for all the hidden states\n '
raise NotImplementedError
|
class AbsFeaturizer(nn.Module):
"\n The featurizer should follow this interface. Please subclass it.\n The featurizer's mission is to reduce (standardize) the multiple hidden\n states from :obj:`AbsUpstream` into a single hidden state, so that\n the downstream model can use it as a conventional representation.\n "
@property
def output_size(self) -> int:
'\n The output size after hidden states reduction\n '
raise NotImplementedError
@property
def downsample_rate(self) -> int:
'\n The downsample rate from 16 KHz waveform of the reduced single hidden state\n '
raise NotImplementedError
def forward(self, all_hs: List[torch.FloatTensor], all_hs_len: List[torch.LongTensor]) -> Tuple[(torch.FloatTensor, torch.LongTensor)]:
'\n Args:\n all_hs (List[torch.FloatTensor]): all the hidden states\n all_hs_len (List[torch.LongTensor]): the lengths for all the hidden states\n\n Returns:\n tuple:\n\n 1. hs (torch.FloatTensor)\n 2. hs_len (torch.LongTensor)\n '
raise NotImplementedError
|
class AbsFrameModel(nn.Module):
'\n The frame-level model interface.\n '
@property
def input_size(self) -> int:
raise NotImplementedError
@property
def output_size(self) -> int:
raise NotImplementedError
def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> Tuple[(torch.FloatTensor, torch.LongTensor)]:
raise NotImplementedError
|
class AbsUtteranceModel(nn.Module):
'\n The utterance-level model interface, which pools the temporal dimension.\n '
@property
def input_size(self) -> int:
raise NotImplementedError
@property
def output_size(self) -> int:
raise NotImplementedError
def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> torch.FloatTensor:
raise NotImplementedError
|
class FrameLevelLinear(FrameLevel):
'\n The frame-level linear probing model used in SUPERB Benchmark\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=256):
super().__init__(input_size, output_size, hidden_sizes=[hidden_size])
|
class MeanPoolingLinear(UtteranceLevel):
'\n The utterance-level linear probing model used in SUPERB Benchmark\n '
def __init__(self, input_size: int, output_size: int, hidden_size: int=256):
super().__init__(input_size, output_size, hidden_sizes=[hidden_size])
|
class MeanPooling(nn.Module):
'\n Computes Temporal Average Pooling (MeanPooling over time) Module\n '
def __init__(self, input_size: int):
super().__init__()
self._in_size = input_size
@property
def input_size(self) -> int:
return self._in_size
@property
def output_size(self) -> int:
return self._in_size
def forward(self, xs: torch.Tensor, xs_len: torch.LongTensor):
'\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
pooled = torch.mean(x[:x_len], dim=0)
pooled_list.append(pooled)
return torch.stack(pooled_list)
|
class TemporalStatisticsPooling(nn.Module):
'\n TemporalStatisticsPooling\n Paper: X-vectors: Robust DNN Embeddings for Speaker Recognition\n Link: http://www.danielpovey.com/files/2018_icassp_xvectors.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._input_size = input_size
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return (self._input_size * 2)
def forward(self, xs, xs_len):
'\n Computes Temporal Statistics Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, output_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
mean = torch.mean(x[:x_len], dim=0)
std = torch.std(x[:x_len], dim=0)
pooled = torch.cat((mean, std), dim=(- 1))
pooled_list.append(pooled)
return torch.stack(pooled_list)
|
class SelfAttentivePooling(nn.Module):
'\n SelfAttentivePooling\n Paper: Self-Attentive Speaker Embeddings for Text-Independent Speaker Verification\n Link: https://danielpovey.com/files/2018_interspeech_xvector_attention.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._indim = input_size
self.sap_linear = nn.Linear(input_size, input_size)
self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._indim
def forward(self, xs, xs_len):
'\n Computes Self-Attentive Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
x = x[:x_len].unsqueeze(0)
h = torch.tanh(self.sap_linear(x))
w = torch.matmul(h, self.attention).squeeze(dim=2)
w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
x = torch.sum((x * w), dim=1)
pooled_list.append(x.squeeze(0))
return torch.stack(pooled_list)
|
class AttentiveStatisticsPooling(nn.Module):
'\n AttentiveStatisticsPooling\n Paper: Attentive Statistics Pooling for Deep Speaker Embedding\n Link: https://arxiv.org/pdf/1803.10963.pdf\n '
def __init__(self, input_size: int):
super().__init__()
self._indim = input_size
self.sap_linear = nn.Linear(input_size, input_size)
self.attention = nn.Parameter(torch.FloatTensor(input_size, 1))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return (self._indim * 2)
def forward(self, xs, xs_len):
'\n Computes Attentive Statistics Pooling Module\n\n Args:\n xs (torch.Tensor): Input tensor (#batch, frames, input_size).\n xs_len (torch.LongTensor): with the lengths for each sample\n\n Returns:\n torch.Tensor: Output tensor (#batch, input_size)\n '
pooled_list = []
for (x, x_len) in zip(xs, xs_len):
x = x[:x_len].unsqueeze(0)
h = torch.tanh(self.sap_linear(x))
w = torch.matmul(h, self.attention).squeeze(dim=2)
w = F.softmax(w, dim=1).view(x.size(0), x.size(1), 1)
mu = torch.sum((x * w), dim=1)
rh = torch.sqrt((torch.sum(((x ** 2) * w), dim=1) - (mu ** 2)).clamp(min=1e-05))
x = torch.cat((mu, rh), 1).squeeze(0)
pooled_list.append(x)
return torch.stack(pooled_list)
|
class PredictorIdentity(nn.Module):
'\n This nn module is used as a predictor placeholder for certain SSL problems.\n '
def __init__(self, **kwargs):
super(PredictorIdentity, self).__init__()
def forward(self, output: Output):
'\n Args:\n output (s3prl.Output): An Output module\n\n Return:\n output (s3prl.Output): exactly the same as input, an Output module\n '
return output
|
class PredictorMockingjay(nn.Module):
'\n The predictor model for SSL pre-training tasks.\n Currently supporting SSL problems of Mockingjay, Tera, and Audio Albert.\n '
def __init__(self, config, output_dim, input_dim=None, **kwargs):
'\n Args:\n config (TransformerConfig):\n A `TransformerConfig` class instance with the configuration to build a new model,\n can also be a `dict` that initializes the TransformerConfig class\n output_dim (int):\n The output dimension of predictor\n input_dim (int):\n The input dimension of predictor, if `None` is given, then use the `hidden_size` defined in `config`.\n Default: None\n '
super(PredictorMockingjay, self).__init__()
if (type(config) is dict):
config = TransformerConfig(**config)
self.output_size = output_dim
if (input_dim is None):
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
else:
self.dense = nn.Linear(input_dim, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.output = nn.Linear(config.hidden_size, self.output_size)
def forward(self, inputs, output_states=False):
'\n Args:\n inputs (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, input_dim]\n output_states (bool):\n A boolean which controls whether to return the `hidden_states` of the predictor.\n Default: False\n Return:\n Output (s3prl.Output):\n An Output module that contains `prediction` and/or `hidden_states`.\n '
hidden_states = inputs.hidden_states
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
prediction = self.output(hidden_states)
if output_states:
return Output(hidden_states=hidden_states, prediction=prediction)
else:
return Output(prediction=prediction)
|
class softmax(nn.Module):
'\n The standard softmax loss in an unified interface for all speaker-related softmax losses\n '
def __init__(self, input_size: int, output_size: int):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.fc = nn.Linear(input_size, output_size)
self.criertion = nn.CrossEntropyLoss()
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.Tensor, label: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, input_size)\n label (torch.LongTensor): (batch_size, )\n\n Returns:\n loss (torch.float)\n logit (torch.Tensor): (batch_size, )\n '
assert (x.size()[0] == label.size()[0])
assert (x.size()[1] == self.input_size)
x = F.normalize(x, dim=1)
x = self.fc(x)
loss = self.criertion(x, label)
return (loss, x)
|
class amsoftmax(nn.Module):
'\n AMSoftmax\n\n Args:\n input_size (int): The input feature size\n output_size (int): The output feature size\n margin (float): Hyperparameter denotes the margin to the decision boundry\n scale (float): Hyperparameter that scales the cosine value\n '
def __init__(self, input_size: int, output_size: int, margin: float=0.2, scale: float=30):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.margin = margin
self.scale = scale
self.W = torch.nn.Parameter(torch.randn(input_size, output_size), requires_grad=True)
self.ce = nn.CrossEntropyLoss()
nn.init.xavier_normal_(self.W, gain=1)
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.Tensor, label: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, input_size)\n label (torch.LongTensor): (batch_size, )\n\n Returns:\n loss (torch.float)\n logit (torch.Tensor): (batch_size, )\n '
assert (x.size()[0] == label.size()[0])
assert (x.size()[1] == self.input_size)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True).clamp(min=1e-12)
x_norm = torch.div(x, x_norm)
w_norm = torch.norm(self.W, p=2, dim=0, keepdim=True).clamp(min=1e-12)
w_norm = torch.div(self.W, w_norm)
costh = torch.mm(x_norm, w_norm)
label_view = label.view((- 1), 1)
if label_view.is_cuda:
label_view = label_view.cpu()
delt_costh = torch.zeros(costh.size()).scatter_(1, label_view, self.margin)
if x.is_cuda:
delt_costh = delt_costh.cuda()
costh_m = (costh - delt_costh)
costh_m_s = (self.scale * costh_m)
loss = self.ce(costh_m_s, label)
return (loss, costh_m_s)
|
class TDNN(nn.Module):
'\n TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf.\n\n Context size and dilation determine the frames selected\n (although context size is not really defined in the traditional sense).\n\n For example:\n\n context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]\n\n context size 3 and dilation 2 is equivalent to [-2, 0, 2]\n\n context size 1 and dilation 1 is equivalent to [0]\n\n Args:\n input_size (int): The input feature size\n output_size (int): The output feature size\n context_size (int): See example\n dilation (int): See example\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int, context_size: int, dilation: int, dropout_p: float=0.0, batch_norm: bool=True):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.context_size = context_size
self.dilation = dilation
self.dropout_p = dropout_p
self.batch_norm = batch_norm
self.kernel = nn.Linear((input_size * context_size), output_size)
self.nonlinearity = nn.ReLU()
if batch_norm:
self.bn = nn.BatchNorm1d(output_size)
if dropout_p:
self.drop = nn.Dropout(p=dropout_p)
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor):
'\n Args:\n x (torch.FloatTensor): (batch, seq_len, input_size)\n\n Returns:\n torch.FloatTensor: (batch, seq_len, output_size)\n '
(_, _, d) = x.shape
assert (d == self.input_size), 'Input size was wrong. Expected ({}), got ({})'.format(self.input_size, d)
x = x.unsqueeze(1)
x = F.unfold(x, (self.context_size, self.input_size), stride=(1, self.input_size), dilation=(self.dilation, 1))
x = x.transpose(1, 2)
x = self.kernel(x)
x = self.nonlinearity(x)
if self.dropout_p:
x = self.drop(x)
if self.batch_norm:
x = x.transpose(1, 2)
x = self.bn(x)
x = x.transpose(1, 2)
return x
|
class XVectorBackbone(nn.Module):
'\n The TDNN layers the same as in https://danielpovey.com/files/2018_odyssey_xvector_lid.pdf.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1500) The size of the speaker embedding\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int=1500, dropout_p: float=0.0, batch_norm: False=True):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.module = nn.Sequential(TDNN(input_size=input_size, output_size=512, context_size=5, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=3, dilation=2, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=3, dilation=3, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=512, context_size=1, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm), TDNN(input_size=512, output_size=output_size, context_size=1, dilation=1, dropout_p=dropout_p, batch_norm=batch_norm))
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor):
'\n Args:\n x (torch.FloatTensor): (batch, seq_len, input_size)\n\n output:\n torch.FloatTensor: (batch, seq_len, output_size)\n '
x = self.module(x)
return x
|
class _SEModule(nn.Module):
def __init__(self, channels, bottleneck=128):
super().__init__()
self.se = nn.Sequential(nn.AdaptiveAvgPool1d(1), nn.Conv1d(channels, bottleneck, kernel_size=1, padding=0), nn.ReLU(), nn.Conv1d(bottleneck, channels, kernel_size=1, padding=0), nn.Sigmoid())
def forward(self, input):
x = self.se(input)
return (input * x)
|
class _Bottle2neck(nn.Module):
def __init__(self, inplanes, planes, kernel_size=None, dilation=None, scale=8):
super().__init__()
width = int(math.floor((planes / scale)))
self.conv1 = nn.Conv1d(inplanes, (width * scale), kernel_size=1)
self.bn1 = nn.BatchNorm1d((width * scale))
self.nums = (scale - 1)
convs = []
bns = []
num_pad = (math.floor((kernel_size / 2)) * dilation)
for i in range(self.nums):
convs.append(nn.Conv1d(width, width, kernel_size=kernel_size, dilation=dilation, padding=num_pad))
bns.append(nn.BatchNorm1d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv1d((width * scale), planes, kernel_size=1)
self.bn3 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU()
self.width = width
self.se = _SEModule(planes)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if (i == 0):
sp = spx[i]
else:
sp = (sp + spx[i])
sp = self.convs[i](sp)
sp = self.relu(sp)
sp = self.bns[i](sp)
if (i == 0):
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.nums]), 1)
out = self.conv3(out)
out = self.relu(out)
out = self.bn3(out)
out = self.se(out)
out += residual
return out
|
class ECAPA_TDNN(nn.Module):
'\n ECAPA-TDNN model as in https://arxiv.org/abs/2005.07143.\n\n Reference code: https://github.com/TaoRuijie/ECAPA-TDNN.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1536) The size of the speaker embedding\n C (int): (default, 1024) The channel dimension\n '
def __init__(self, input_size: int=80, output_size: int=1536, C: int=1024, **kwargs):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.conv1 = nn.Conv1d(input_size, C, kernel_size=5, stride=1, padding=2)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(C)
self.layer1 = _Bottle2neck(C, C, kernel_size=3, dilation=2, scale=8)
self.layer2 = _Bottle2neck(C, C, kernel_size=3, dilation=3, scale=8)
self.layer3 = _Bottle2neck(C, C, kernel_size=3, dilation=4, scale=8)
self.layer4 = nn.Conv1d((3 * C), output_size, kernel_size=1)
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x: torch.FloatTensor):
'\n Args:\n x (torch.FloatTensor): size (batch, seq_len, input_size)\n\n Returns:\n x (torch.FloatTensor): size (batch, seq_len, output_size)\n '
x = self.conv1(x.transpose(1, 2).contiguous())
x = self.relu(x)
x = self.bn1(x)
x1 = self.layer1(x)
x2 = self.layer2((x + x1))
x3 = self.layer3(((x + x1) + x2))
x = self.layer4(torch.cat((x1, x2, x3), dim=1))
x = self.relu(x)
x = x.transpose(1, 2).contiguous()
return x
|
class SpeakerEmbeddingExtractor(nn.Module):
'\n The speaker embedding extractor module.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 1500) The size of the speaker embedding\n backbone (str): (default, XVector) Use which kind of speaker model\n pooling_type (str): (default, TAP) Use which kind of pooling method\n '
def __init__(self, input_size: int, output_size: int=1500, backbone: str='XVector', pooling_type: str='TemporalAveragePooling'):
super().__init__()
self._indim = input_size
self._outdim = output_size
if (backbone == 'XVector'):
self.backbone = XVectorBackbone(input_size=input_size, output_size=output_size)
self.offset = XVECTOR_TDNNS_LENGTH_REDUCTION
elif (backbone == 'ECAPA-TDNN'):
self.backbone = ECAPA_TDNN(input_size=input_size, output_size=output_size)
self.offset = ECAPA_TDNNS_LENGTH_REDUCTION
else:
raise ValueError('{} backbone type is not defined'.format(backbone))
if ((pooling_type == 'TemporalAveragePooling') or (pooling_type == 'TAP')):
self.pooling = TemporalAveragePooling(self.backbone.output_size)
elif ((pooling_type == 'TemporalStatisticsPooling') or (pooling_type == 'TSP')):
self.pooling = TemporalStatisticsPooling(self.backbone.output_size)
elif ((pooling_type == 'SelfAttentivePooling') or (pooling_type == 'SAP')):
self.pooling = SelfAttentivePooling(self.backbone.output_size)
elif ((pooling_type == 'AttentiveStatisticsPooling') or (pooling_type == 'ASP')):
self.pooling = AttentiveStatisticsPooling(self.backbone.output_size)
else:
raise ValueError('{} pooling type is not defined'.format(pooling_type))
self._outdim = self.pooling.output_size
@property
def input_size(self) -> int:
return self._indim
@property
def output_size(self) -> int:
return self._outdim
def forward(self, x: torch.Tensor, xlen: torch.LongTensor=None):
'\n Args:\n x (torch.Tensor): size (batch, seq_len, input_size)\n xlen (torch.LongTensor): size (batch, )\n\n Returns:\n x (torch.Tensor): size (batch, output_size)\n '
x = self.backbone(x)
if (xlen is not None):
xlen = torch.LongTensor([max((item - self.offset), 0) for item in xlen])
else:
xlen = torch.LongTensor(([x.shape[1]] * x.shape[0]))
x = self.pooling(x, xlen)
return x
|
class _UtteranceExtractor(nn.Module):
def __init__(self, input_size, output_size):
super().__init__()
self._indim = input_size
self._outdim = output_size
self.linear1 = nn.Linear(input_size, output_size)
self.linear2 = nn.Linear(output_size, output_size)
self.act_fn = nn.ReLU()
@property
def input_size(self):
return self._indim
@property
def output_size(self):
return self._outdim
def forward(self, x_BxH):
hid_BxH = self.linear1(x_BxH)
hid_BxH = self.act_fn(hid_BxH)
if self.training:
hid_BxH = self.linear2(hid_BxH)
hid_BxH = self.act_fn(hid_BxH)
return hid_BxH
|
class SuperbXvector(nn.Module):
'\n The Xvector used in the SUPERB Benchmark with the exact default arguments.\n\n Args:\n input_size (int): The input feature size, usually is the output size of upstream models\n output_size (int): (default, 512) The size of the speaker embedding\n hidden_size (int): (default, 512) The major hidden size in the network\n aggregation_size (int): (default, 1500) The output size of the x-vector, which is usually large\n dropout_p (float): (default, 0.0) The dropout rate\n batch_norm (bool): (default, False) Use batch norm for TDNN layers\n '
def __init__(self, input_size: int, output_size: int=512, hidden_size: int=512, aggregation_size: int=1500, dropout_p: float=0.0, batch_norm: bool=False):
super().__init__()
self._input_size = input_size
self._output_size = output_size
self.projector = nn.Linear(input_size, hidden_size)
self.tdnns = XVectorBackbone(hidden_size, aggregation_size, dropout_p=dropout_p, batch_norm=batch_norm)
latest_size = self.tdnns.output_size
self.pooling = TemporalStatisticsPooling(latest_size)
latest_size = self.pooling.output_size
self.affine = _UtteranceExtractor(latest_size, output_size)
@property
def input_size(self) -> int:
return self._input_size
@property
def output_size(self) -> int:
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.FloatTensor): (batch_size, seq_len, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Returns:\n torch.FloatTensor: (batch_size, output_size)\n '
x = self.projector(x)
x = self.tdnns(x)
x_len = (x_len - XVECTOR_TDNNS_LENGTH_REDUCTION)
assert ((x_len <= 0).sum() == 0), 'The input sequence is too short for the X-vector model'
x = self.pooling(x, x_len)
x = self.affine(x)
return x
|
class TransformerConfig(object):
'\n Configuration class to store the configuration of a `TransformerModel`.\n '
def __init__(self, hidden_size: int=768, num_hidden_layers: int=3, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, initializer_range: float=0.02, layer_norm_eps: float=1e-12, share_layer: bool=False, pre_layer_norm: bool=False):
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.share_layer = share_layer
self.pre_layer_norm = pre_layer_norm
|
def prune_linear_layer(layer, index, dim=0):
'\n Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n '
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if (layer.bias is not None):
if (dim == 1):
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=(layer.bias is not None)).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if (layer.bias is not None):
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
|
def gelu(x):
"\n Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n "
return ((x * 0.5) * (1.0 + torch.erf((x / math.sqrt(2.0)))))
|
def swish(x):
return (x * torch.sigmoid(x))
|
class TransformerLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
'\n Construct a layernorm module in the TF style (epsilon inside the square root).\n '
super(TransformerLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean((- 1), keepdim=True)
s = (x - u).pow(2).mean((- 1), keepdim=True)
x = ((x - u) / torch.sqrt((s + self.variance_epsilon)))
return ((self.weight * x) + self.bias)
|
class TransformerInputRepresentations(nn.Module):
'\n Construct the input representation from spectrogram, and position encodings.\n '
def __init__(self, config, input_dim):
super(TransformerInputRepresentations, self).__init__()
self.hidden_size = config.hidden_size
self.spec_transform = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, spec, pos_enc):
spec_transformed = self.spec_transform(spec)
input_representations = (spec_transformed + pos_enc)
input_representations = self.LayerNorm(input_representations)
input_representations = self.dropout(input_representations)
return input_representations
|
class TransformerSelfAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerSelfAttention, self).__init__()
if ((config.hidden_size % config.num_attention_heads) != 0):
raise ValueError(('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)))
self.output_attentions = output_attentions
self.keep_multihead_output = keep_multihead_output
self.multihead_output = None
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int((config.hidden_size / config.num_attention_heads))
self.all_head_size = (self.num_attention_heads * self.attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_scores = (attention_scores + attention_mask)
attention_probs = nn.Softmax(dim=(- 1))(attention_scores)
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
if self.keep_multihead_output:
self.multihead_output = context_layer
self.multihead_output.retain_grad()
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
if self.output_attentions:
return (attention_probs, context_layer)
return context_layer
|
class TransformerSelfOutput(nn.Module):
def __init__(self, config):
super(TransformerSelfOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerAttention(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerAttention, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.self = TransformerSelfAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.output = TransformerSelfOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def prune_heads(self, heads):
if (len(heads) == 0):
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view((- 1)).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = (self.self.num_attention_heads - len(heads))
self.self.all_head_size = (self.self.attention_head_size * self.self.num_attention_heads)
def forward(self, input_tensor, attention_mask, head_mask=None):
if self.pre_layer_norm:
self_output = self.LayerNorm(input_tensor)
self_output = self.self(self_output, attention_mask, head_mask)
else:
self_output = self.self(input_tensor, attention_mask, head_mask)
if self.output_attentions:
(attentions, self_output) = self_output
attention_output = self.output(self_output, input_tensor)
if self.output_attentions:
return (attentions, attention_output)
return attention_output
|
class TransformerIntermediate(nn.Module):
def __init__(self, config):
super(TransformerIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class TransformerOutput(nn.Module):
def __init__(self, config):
super(TransformerOutput, self).__init__()
self.pre_layer_norm = config.pre_layer_norm
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = (hidden_states + input_tensor)
if (not self.pre_layer_norm):
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class TransformerLayer(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False):
super(TransformerLayer, self).__init__()
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
self.attention = TransformerAttention(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.intermediate = TransformerIntermediate(config)
self.output = TransformerOutput(config)
if self.pre_layer_norm:
self.LayerNorm = self.output.LayerNorm
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_output = self.attention(hidden_states, attention_mask, head_mask)
if self.output_attentions:
(attentions, attention_output) = attention_output
if self.pre_layer_norm:
intermediate_output = self.LayerNorm(attention_output)
intermediate_output = self.intermediate(intermediate_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if self.output_attentions:
return (attentions, layer_output)
return layer_output
|
class TransformerEncoder(nn.Module):
def __init__(self, config, output_attentions=False, keep_multihead_output=False, **kwargs):
super(TransformerEncoder, self).__init__()
if (type(config) is dict):
config = TransformerConfig(**config)
self.output_attentions = output_attentions
self.pre_layer_norm = config.pre_layer_norm
layer = TransformerLayer(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
if config.share_layer:
self.layer = nn.ModuleList([layer for _ in range(config.num_hidden_layers)])
else:
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
if self.pre_layer_norm:
LayerNorm = TransformerLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm = nn.ModuleList([copy.deepcopy(LayerNorm) for _ in range((config.num_hidden_layers + 1))])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, head_mask=None):
all_encoder_layers = []
all_attentions = []
for (i, layer_module) in enumerate(self.layer):
if output_all_encoded_layers:
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[i](hidden_states))
else:
all_encoder_layers.append(hidden_states)
hidden_states = layer_module(hidden_states, attention_mask, head_mask[i])
if self.output_attentions:
(attentions, hidden_states) = hidden_states
all_attentions.append(attentions)
if self.pre_layer_norm:
all_encoder_layers.append(self.LayerNorm[(- 1)](hidden_states))
else:
all_encoder_layers.append(hidden_states)
if self.output_attentions:
return (all_attentions, all_encoder_layers)
return all_encoder_layers
|
class TransformerInitModel(nn.Module):
'\n An abstract class to handle weights initialization.\n '
def __init__(self, config, output_attentions, *inputs, **kwargs):
super(TransformerInitModel, self).__init__()
self.config = config
self.output_attentions = output_attentions
def init_Transformer_weights(self, module):
'\n Initialize the weights.\n '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, TransformerLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
class TransformerMockingjay(TransformerInitModel):
'\n The Transformer model.\n Currently supporting upstreams models of Mockingjay, Tera, and Audio Albert.\n '
def __init__(self, config, input_dim, output_attentions=False, keep_multihead_output=False, with_input_module=True):
'\n Args:\n config (TransformerConfig):\n A `TransformerConfig` class instance with the configuration to build a new model,\n can also be a `dict` that initializes the TransformerConfig class\n intput_dim (int):\n The input dimension of model\n output_attentions:\n If True, also output attentions weights computed by the model at each layer.\n Default: False\n keep_multihead_output (bool):\n If True, saves output of the multi-head attention module with its gradient.\n This can be used to compute head importance metrics.\n Default: False\n with_input_module (bool):\n If True, set up the `TransformerModel` with a `TransformerInputRepresentations` class instance.\n Default: True\n '
super(TransformerMockingjay, self).__init__(config, output_attentions)
self.with_input_module = with_input_module
if self.with_input_module:
self.input_representations = TransformerInputRepresentations(config, input_dim)
self.encoder = TransformerEncoder(config, output_attentions=output_attentions, keep_multihead_output=keep_multihead_output)
self.apply(self.init_Transformer_weights)
self.input_size = input_dim
def prune_heads(self, heads_to_prune):
'\n Prunes heads of the model.\n heads_to_prune (dict):\n dict of {layer_num: list of heads to prune in this layer}\n '
for (layer, heads) in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_multihead_outputs(self):
'\n Gather all multi-head outputs.\n Return:\n list (layers) of multihead module outputs with gradients\n '
return [layer.attention.self.multihead_output for layer in self.encoder.layer]
def forward(self, spec_input, pos_enc=None, attention_mask=None, output_all_encoded_layers=False, head_mask=None):
"\n Args:\n spec_input (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, feature_dimension]\n with the selected frames processed as masked frames during training,\n generated by the `process_train_MAM_data()` function in `transformer/mam.py`.\n pos_enc (torch.LongTensor):\n A torch.LongTensor of shape [batch_size, sequence_length, hidden_size],\n generated by the `fast_position_encoding()` function in `transformer/mam.py`.\n attention_mask (torch.LongTensor):\n An optional torch.LongTensor of shape [batch_size, sequence_length] with indices\n selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max\n input sequence length in the current batch. It's the mask that we typically use for attention when\n a batch has varying length sentences.\n output_all_encoded_layers (bool):\n A boolean which controls the content of the `encoded_layers` output as described below.\n Default: True\n head_mask (torch.Tensor):\n An optional torch.Tensor of shape [num_heads] or [num_layers, num_heads] with indices between 0 and 1.\n It's a mask to be used to nullify some heads of the transformer. 1.0 => head is fully masked, 0.0 => head is not masked.\n Return:\n Output (s3prl.Output):\n An Output module that contains `hidden_states` and/or `output`.\n\n hidden_states (encoded_layers):\n controled by the `output_all_encoded_layers` argument of `forward`:\n - If `output_all_encoded_layers==True`: outputs a list of the full sequences of encoded-hidden-states\n at the end of each attention block, each encoded-hidden-state is a torch.FloatTensor\n of size [batch_size, sequence_length, hidden_size], i.e [num_hidden_layers, batch_size, sequence_length, hidden_size]\n - If `output_all_encoded_layers==False`: outputs only the full sequence of hidden-states corresponding\n to the last attention block of shape [batch_size, sequence_length, hidden_size].\n output (all_attentions):\n controled by the `output_attentions` argument of `__init__`:\n - If `output_attentions==True`, also output attentions weights computed by the model at each layer.\n "
if (attention_mask is None):
attention_mask = torch.ones_like(spec_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=spec_input.dtype)
extended_attention_mask = ((1.0 - extended_attention_mask) * (- 10000.0))
if (head_mask is not None):
if (head_mask.dim() == 1):
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.expand_as(self.config.num_hidden_layers, (- 1), (- 1), (- 1), (- 1))
elif (head_mask.dim() == 2):
head_mask = head_mask.unsqueeze(1).unsqueeze((- 1)).unsqueeze((- 1))
head_mask = head_mask.to(dtype=spec_input.dtype)
else:
head_mask = ([None] * self.config.num_hidden_layers)
if self.with_input_module:
input_representations = self.input_representations(spec_input, pos_enc)
else:
input_representations = spec_input
encoded_layers = self.encoder(input_representations, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, head_mask=head_mask)
if self.output_attentions:
(all_attentions, encoded_layers) = encoded_layers
if (not output_all_encoded_layers):
encoded_layers = encoded_layers[(- 1)]
if self.output_attentions:
return Output(output=all_attentions, hidden_states=encoded_layers)
return Output(hidden_states=encoded_layers)
|
class VqApcLayer(nn.Module):
'\n The Vq Layer.\n Currently used in the upstream model of VQ-APC (nn/rnn_apc.py).\n Defines a VQ layer that follows an RNN layer.\n '
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Args:\n input_size (int):\n An int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size (int):\n An int indicating the number of codes.\n code_dim (int):\n An int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature (float):\n A float indicating the temperature for gumbel-softmax.\n '
super(VqApcLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing=False):
'\n Args:\n inputs_BxLxI (torch.LongTensor):\n A 3d-tensor representing the input features.\n testing (bool):\n A bool indicating training or testing phase.\n Default: False\n Return:\n Output (s3prl.Output):\n An Output module that contains `output` and `logit`\n\n output (codes_BxLxE):\n The VQ codes.\n logit (logits_BxLxC):\n The VQ logits.\n '
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return Output(output=codes_BxLxE, logit=logits_BxLxC)
def report_ppx(self):
'\n Computes perplexity of distribution over codebook.\n '
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'\n Computes usage each entry in codebook.\n '
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
def get_optimizer(model_params, total_steps, optimizer_config):
optimizer_config = copy.deepcopy(optimizer_config)
optimizer_name = optimizer_config.pop('name')
optimizer = eval(f'get_{optimizer_name}')(model_params, total_steps=total_steps, **optimizer_config)
return optimizer
|
def get_grouped_parameters(model_params):
named_params = []
for m in model_params:
named_params += list(m.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
grouped_parameters = [{'params': [p for (n, p) in named_params if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in named_params if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
return grouped_parameters
|
def get_BertAdam_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = BertAdam(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps)
return optimizer
|
def get_AdamW_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = Lamb(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps, adam=True, correct_bias=True, **kwargs)
return optimizer
|
def get_Lamb_with_schedule(model_params, lr=0.0002, total_steps=20000, warmup_proportion=0.07, **kwargs):
grouped_parameters = get_grouped_parameters(model_params)
optimizer = Lamb(grouped_parameters, lr=lr, warmup=warmup_proportion, t_total=total_steps, adam=False, correct_bias=False, **kwargs)
return optimizer
|
def get_Adam(model_params, lr=0.0002, **kwargs):
params = []
for m in model_params:
params += list(m.parameters())
return Adam(params, lr=lr, betas=(0.9, 0.999))
|
def get_AdamW(model_params, lr=0.0002, **kwargs):
params = []
for m in model_params:
params += list(m.parameters())
optimizer = AdamW(params, lr=lr)
return optimizer
|
def get_TorchOptim(model_params, torch_optim_name, **kwargs):
params = []
for m in model_params:
params += list(m.parameters())
Opt_class = getattr(torch.optim, torch_optim_name)
kwargs.pop('total_steps')
optim = Opt_class(params, **kwargs)
return optim
|
class AdamW(Optimizer):
"\n Implements Adam algorithm with weight decay fix as introduced in\n `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.\n Parameters:\n params (:obj:`Iterable[torch.nn.parameter.Parameter]`):\n Iterable of parameters to optimize or dictionaries defining parameter groups.\n lr (:obj:`float`, `optional`, defaults to 1e-3):\n The learning rate to use.\n betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):\n Adam's betas parameters (b1, b2).\n eps (:obj:`float`, `optional`, defaults to 1e-6):\n Adam's epsilon for numerical stability.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n Decoupled weight decay to apply.\n correct_bias (:obj:`bool`, `optional`, defaults to `True`):\n Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).\n "
def __init__(self, params: Iterable[torch.nn.parameter.Parameter], lr: float=0.001, betas: Tuple[(float, float)]=(0.9, 0.999), eps: float=1e-07, weight_decay: float=0.0, correct_bias: bool=True):
if (lr < 0.0):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: Callable=None):
'\n Performs a single optimization step.\n Arguments:\n closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2))
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']:
bias_correction1 = (1.0 - (beta1 ** state['step']))
bias_correction2 = (1.0 - (beta2 ** state['step']))
step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_(exp_avg, denom, value=(- step_size))
if (group['weight_decay'] > 0.0):
p.data.add_(p.data, alpha=((- group['lr']) * group['weight_decay']))
return loss
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
pass
else:
lr.append(group['lr'])
return lr
|
class _LRSchedule(ABC):
' Parent of all LRSchedules here. '
warn_t_total = False
def __init__(self, warmup=0.002, t_total=(- 1), **kw):
'\n :param warmup: what fraction of t_total steps will be used for linear warmup\n :param t_total: how many training steps (updates) are planned\n :param kw:\n '
super(_LRSchedule, self).__init__(**kw)
if (t_total < 0):
logger.warning('t_total value of {} results in schedule not being applied'.format(t_total))
if ((not (0.0 <= warmup < 1.0)) and (not (warmup == (- 1)))):
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
warmup = max(warmup, 0.0)
(self.warmup, self.t_total) = (float(warmup), float(t_total))
self.warned_for_t_total_at_progress = (- 1)
def get_lr(self, step, nowarn=False):
"\n :param step: which of t_total steps we're on\n :param nowarn: set to True to suppress warning regarding training beyond specified 't_total' steps\n :return: learning rate multiplier for current update\n "
if (self.t_total < 0):
return 1.0
progress = (float(step) / self.t_total)
ret = self.get_lr_(progress)
if ((not nowarn) and self.warn_t_total and (progress > 1.0) and (progress > self.warned_for_t_total_at_progress)):
logger.warning("Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly.".format(ret, self.__class__.__name__))
self.warned_for_t_total_at_progress = progress
return ret
@abc.abstractmethod
def get_lr_(self, progress):
'\n :param progress: value between 0 and 1 (unless going beyond t_total steps) specifying training progress\n :return: learning rate multiplier for current update\n '
return 1.0
|
class ConstantLR(_LRSchedule):
def get_lr_(self, progress):
return 1.0
|
class WarmupCosineSchedule(_LRSchedule):
'\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.\n If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.\n '
warn_t_total = True
def __init__(self, warmup=0.002, t_total=(- 1), cycles=0.5, **kw):
'\n :param warmup: see LRSchedule\n :param t_total: see LRSchedule\n :param cycles: number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.\n :param kw:\n '
super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
self.cycles = cycles
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
else:
progress = ((progress - self.warmup) / (1 - self.warmup))
return (0.5 * (1.0 + math.cos((((math.pi * self.cycles) * 2) * progress))))
|
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
'\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying\n learning rate (with hard restarts).\n '
def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert (cycles >= 1.0)
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
else:
progress = ((progress - self.warmup) / (1 - self.warmup))
ret = (0.5 * (1.0 + math.cos((math.pi * ((self.cycles * progress) % 1)))))
return ret
|
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
'\n All training progress is divided in `cycles` (default=1.) parts of equal length.\n Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,\n followed by a learning rate decreasing from 1. to 0. following a cosine curve.\n '
def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw):
assert ((warmup * cycles) < 1.0)
warmup = ((warmup * cycles) if (warmup >= 0) else warmup)
super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
def get_lr_(self, progress):
progress = ((progress * self.cycles) % 1.0)
if (progress < self.warmup):
return (progress / self.warmup)
else:
progress = ((progress - self.warmup) / (1 - self.warmup))
ret = (0.5 * (1.0 + math.cos((math.pi * progress))))
return ret
|
class WarmupConstantSchedule(_LRSchedule):
'\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Keeps learning rate equal to 1. after warmup.\n '
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
return 1.0
|
class WarmupLinearSchedule(_LRSchedule):
'\n Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.\n Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.\n '
warn_t_total = True
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
return max(((progress - 1.0) / (self.warmup - 1.0)), 0.0)
|
class BertAdam(Optimizer):
"Implements BERT version of Adam algorithm with weight decay fix.\n Params:\n lr: learning rate\n warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total: total number of training steps for the learning\n rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1\n schedule: schedule to use for the warmup (see above).\n Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).\n If `None` or `'none'`, learning rate is always kept constant.\n Default : `'warmup_linear'`\n betas: Adams betas. Default: (0.9, 0.999)\n e: Adams epsilon. Default: 1e-6\n weight_decay: Weight decay. Default: 0.01\n max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0\n "
def __init__(self, params=None, lr='required', warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), e=1e-06, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
if ((lr == 'required') or (lr < 0.0)):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if ((not isinstance(schedule, _LRSchedule)) and (schedule not in SCHEDULES)):
raise ValueError('Invalid schedule parameter: {}'.format(schedule))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (e >= 0.0)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(e))
if (not isinstance(schedule, _LRSchedule)):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
elif ((warmup != (- 1)) or (t_total != (- 1))):
logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.')
defaults = dict(lr=lr, schedule=schedule, betas=betas, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
pass
else:
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['next_m'] = torch.zeros_like(p.data)
state['next_v'] = torch.zeros_like(p.data)
(next_m, next_v) = (state['next_m'], state['next_v'])
(beta1, beta2) = group['betas']
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
next_m.mul_(beta1).add_((1 - beta1), grad)
next_v.mul_(beta2).addcmul_((1 - beta2), grad, grad)
update = (next_m / (next_v.sqrt() + group['e']))
if (group['weight_decay'] > 0.0):
update += (group['weight_decay'] * p.data)
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = (lr_scheduled * update)
p.data.add_((- update_with_lr))
state['step'] += 1
return loss
|
class Lamb(Optimizer):
"Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n warmup (float, optional): portion of t_total for the warmup, -1 means no warmup. Default: -1\n t_total (int, optional): total number of training steps for the learning\n rate schedule, -1 means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1\n schedule (string, optional): schedule to use for the warmup (see above).\n Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).\n If `None` or `'none'`, learning rate is always kept constant.\n Default : `'warmup_linear'`\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes. Set to True for AdamW.\n correct_bias (bool, optional): adam-correction, no bias correction for Bert. Set to True for AdamW.\n .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n "
def __init__(self, params, lr=0.001, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', betas=(0.9, 0.999), eps=1e-08, weight_decay=0.0, adam=False, correct_bias=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not isinstance(schedule, _LRSchedule)):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
elif ((warmup != (- 1)) or (t_total != (- 1))):
logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.')
defaults = dict(lr=lr, betas=betas, eps=eps, schedule=schedule, weight_decay=weight_decay, correct_bias=correct_bias)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
pass
else:
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
step_size = group['lr']
if group['correct_bias']:
bias_correction1 = (1.0 - (beta1 ** state['step']))
bias_correction2 = (1.0 - (beta2 ** state['step']))
step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1)
lr_scheduled = (step_size * group['schedule'].get_lr(state['step']))
weight_norm = p.data.pow(2).sum().sqrt()
adam_step = (exp_avg / exp_avg_sq.sqrt().add(group['eps']))
if (group['weight_decay'] != 0):
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if ((weight_norm == 0) or (adam_norm == 0)):
trust_ratio = 1
else:
trust_ratio = (weight_norm / adam_norm)
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(((- lr_scheduled) * trust_ratio), adam_step)
return loss
|
def main():
if (not os.path.isdir(KALDI_ROOT)):
print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT)
exit()
if (not os.path.isdir(LIBRI_PATH)):
print('Invalid path for the kaldi librispeech dataset: ', LIBRI_PATH)
print('Please run the kaldi scripts first! More information are described in the README file and Wiki page.')
if (not os.path.isdir(OUTPUT_DIR)):
os.mkdir(OUTPUT_DIR)
for s in SETS:
with ReadHelper(((((('ark:' + LIBRI_PATH) + s) + '/') + DATA_TYPE) + '_cmvn.ark')) as reader:
output = {}
print('Preprocessing', s, 'data...')
cur_dir = os.path.join(OUTPUT_DIR, s.replace('_', '-'))
if (not os.path.isdir(cur_dir)):
os.mkdir(cur_dir)
for (key, array) in tqdm(reader):
array = np.asarray(array).astype('float32')
np.save(os.path.join(cur_dir, key), array)
output[os.path.join(s.replace('_', '-'), (key + '.npy'))] = len(array)
output = sorted(output.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'})
df.to_csv(os.path.join(OUTPUT_DIR, (s.replace('_', '-') + '.csv')))
print((("[ARK-TO-LIBRI] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit."))
exit()
|
def main():
if (not os.path.isdir(KALDI_ROOT)):
print('CHANGE THIS TO YOUR OWN KALDI ROOT: ', KALDI_ROOT)
exit()
if (not os.path.isdir(TIMIT_PATH)):
print('Invalid path for the kaldi TIMIT dataset: ', TIMIT_PATH)
print('Please run the kaldi scripts first! More information are described in the README file and Wiki page.')
if (not os.path.isdir(OUTPUT_DIR)):
os.mkdir(OUTPUT_DIR)
for s in SETS:
output = {}
print('Preprocessing', s, 'data...')
cur_dir = os.path.join(OUTPUT_DIR, s.replace('_', '-'))
if (not os.path.isdir(cur_dir)):
os.mkdir(cur_dir)
for i in range(10):
with ReadHelper(((((((('ark:' + TIMIT_PATH) + s) + '/data/feats_fmllr_') + s) + '.') + str((i + 1))) + '.ark')) as reader:
for (key, array) in tqdm(reader):
array = np.asarray(array).astype('float32')
np.save(os.path.join(cur_dir, key), array)
output[os.path.join(s.replace('_', '-'), (key + '.npy'))] = len(array)
output = sorted(output.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'})
df.to_csv(os.path.join(OUTPUT_DIR, (s.replace('_', '-') + '.csv')))
print((("[ARK-TO-TIMIT] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit."))
exit()
|
def main():
if (not os.path.isdir(KALDI_PATH)):
print('CHANGE THIS TO YOUR OWN KALDI PATH: ', KALDI_PATH)
print('Please run the kaldi scripts first to generate kaldi data directory.')
exit()
if (not os.path.isdir(OUTPUT_DIR)):
os.mkdir(OUTPUT_DIR)
for s in SETS:
print('Preprocessing', s, 'data...')
output = {}
cur_dir = os.path.join(OUTPUT_DIR, s)
if (not os.path.isdir(cur_dir)):
os.mkdir(cur_dir)
path = os.path.join(KALDI_PATH, (s + '/feats.scp'))
for (key, mat) in tqdm(kaldi_io.read_mat_scp(path)):
array = np.asarray(mat).astype('float32')
np.save(os.path.join(cur_dir, key), array)
output[os.path.join(s, (key + '.npy'))] = len(array)
output = sorted(output.items(), key=operator.itemgetter(1), reverse=True)
df = pd.DataFrame(data={'file_path': [fp for (fp, l) in output], 'length': [l for (fp, l) in output], 'label': 'None'})
df.to_csv(os.path.join(OUTPUT_DIR, (s + '.csv')))
print((("[ARK-TO-VOXCELEB] - All done, saved at '" + str(OUTPUT_DIR)) + "', exit."))
exit()
|
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.')
parser.add_argument('-i', '--input_data', default='../LibriSpeech/', type=str, help='Path to your LibriSpeech directory', required=False)
parser.add_argument('-o', '--output_path', default='./data/', type=str, help='Path to store output', required=False)
parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False)
parser.add_argument('-n', '--name', default='len_for_bucket', type=str, help='Name of the output directory', required=False)
parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False)
args = parser.parse_args()
return args
|
def extract_length(input_file):
torchaudio.set_audio_backend('sox_io')
return torchaudio.info(input_file).num_frames
|
def generate_length(args, tr_set, audio_extension):
for (i, s) in enumerate(tr_set):
if os.path.isdir(os.path.join(args.input_data, s.lower())):
s = s.lower()
elif os.path.isdir(os.path.join(args.input_data, s.upper())):
s = s.upper()
else:
assert NotImplementedError
print('')
todo = list(Path(os.path.join(args.input_data, s)).rglob(('*' + audio_extension)))
print(f'Preprocessing data in: {s}, {len(todo)} audio files found.')
output_dir = os.path.join(args.output_path, args.name)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
print('Extracting audio length...', flush=True)
tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_length)(str(file)) for file in tqdm(todo)))
sorted_todo = [os.path.join(s, str(todo[idx]).split((s + '/'))[(- 1)]) for idx in reversed(np.argsort(tr_x))]
df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None})
df.to_csv(os.path.join(output_dir, (tr_set[i] + '.csv')))
print('All done, saved at', output_dir, 'exit.')
|
def main():
args = get_preprocess_args()
if ('librilight' in args.input_data.lower()):
SETS = (['small', 'medium', 'large'] + ['small-splitted', 'medium-splitted', 'large-splitted'])
elif ('librispeech' in args.input_data.lower()):
SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other']
elif ('timit' in args.input_data.lower()):
SETS = ['TRAIN', 'TEST']
else:
raise NotImplementedError
for (idx, s) in enumerate(SETS):
print('\t', idx, ':', s)
tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ')
tr_set = [SETS[int(t)] for t in tr_set.split(' ')]
generate_length(args, tr_set, args.audio_extension)
|
def locate_txt(flac):
filename = os.path.basename(flac)
tags = filename.split('.')[0].split('-')
txt_path = os.path.join(os.path.dirname(flac), f'{tags[0]}-{tags[1]}.trans.txt')
return txt_path
|
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for LibriSpeech dataset.')
parser.add_argument('--data_path', default='./data/libri_alignment', type=str, help='Path to raw LibriSpeech alignment')
parser.add_argument('--output_path', default='./data/libri_phone', type=str, help='Path to store output', required=False)
args = parser.parse_args()
return args
|
def phone_preprocess(data_path, output_path, sets, unaligned):
print('Data sets :')
for (idx, s) in enumerate(sets):
print('\t', idx, ':', s)
todo_sets = input('Please enter the index for preprocessing sets (seperate w/ space): ')
sets = [sets[int(s)] for s in todo_sets.split(' ')]
idx = 0
phone2idx = {}
for s in sets:
print('')
print('Computing', s, 'data...')
for path in tqdm(list(Path(os.path.join(data_path, s)).rglob('*.txt'))):
check_name = path.as_posix().split('/')[(- 1)].split('.')[0]
if ((check_name not in unaligned) and (check_name != 'unaligned')):
for line in open(path).readlines():
phone = line.strip('\n').split(' ')[(- 1)]
if (phone not in phone2idx):
phone2idx[phone] = idx
idx += 1
print('Phone set:')
print(phone2idx)
print(len(phone2idx), 'distinct phones found in', sets)
with open(os.path.join(output_path, 'phone2idx.pkl'), 'wb') as fp:
pickle.dump(phone2idx, fp)
for s in sets:
print('')
print('Preprocessing', s, 'data...')
todo = list(Path(os.path.join(data_path, s)).rglob('*.txt'))
print(len(todo), 'audio files found in', s)
if (not os.path.exists(os.path.join(output_path, s))):
os.makedirs(os.path.join(output_path, s))
print('Preprocessing phone alignments...', flush=True)
for path in tqdm(todo):
check_name = path.as_posix().split('/')[(- 1)].split('.')[0]
if ((check_name not in unaligned) and (check_name != 'unaligned')):
x = []
file = open(path).readlines()
for line in file:
line = line.strip('\n').split(' ')
x += time_to_frame(start_time=float(line[0]), end_time=float(line[1]), phone=phone2idx[line[2]])
x = np.asarray(x)
path_to_save = str(path).replace(data_path.split('/')[(- 1)], output_path.split('/')[(- 1)]).replace('txt', 'pkl')
with open(path_to_save, 'wb') as fp:
pickle.dump(x, fp)
print('Phone preprocessing complete!')
|
def time_to_frame(start_time, end_time, phone):
phones = []
start_time = int((start_time * sample_rate))
end_time = int((end_time * sample_rate))
(_, hop_length, win_length) = _stft_parameters(sample_rate=sample_rate)
h_window = (win_length * 0.5)
start_time = ((start_time - h_window) if (start_time >= h_window) else 0)
end_time = ((end_time - h_window) if (end_time >= h_window) else 0)
times = ((((end_time // hop_length) - (start_time // hop_length)) + (1 if ((start_time % hop_length) == 0) else 0)) - (1 if ((end_time % hop_length) == 0) else 0))
phones += ([phone] * int(times))
return phones
|
def main():
args = get_preprocess_args()
if (not os.path.exists(args.output_path)):
os.makedirs(args.output_path)
try:
file = open(os.path.join(args.data_path, 'train-clean-360/unaligned.txt')).readlines()
unaligned = [str(line).split('\t')[0].split(' ')[0] for line in file]
print('Unaligned list: ', unaligned)
unaligned_pkl = [(('train-clean-360/' + u) + '.npy') for u in unaligned]
with open(os.path.join(args.output_path, 'unaligned.pkl'), 'wb') as fp:
pickle.dump(unaligned_pkl, fp)
except:
raise ValueError('Did not find unaligned.txt!')
sets = ['train-clean-360', 'test-clean']
phone_preprocess(args.data_path, args.output_path, sets, unaligned)
|
def boolean_string(s):
if (s not in ['False', 'True']):
raise ValueError('Not a valid boolean string')
return (s == 'True')
|
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.')
parser.add_argument('--output_path', default='./data/', type=str, help='Path to store output', required=False)
parser.add_argument('--audio_extention', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False)
parser.add_argument('--feature_type', default='fbank', type=str, help='Feature type ( mfcc / fbank / mel / linear )', required=False)
parser.add_argument('--delta', default=False, type=boolean_string, help='Append Delta', required=False)
parser.add_argument('--delta_delta', default=False, type=boolean_string, help='Append Delta Delta', required=False)
parser.add_argument('--apply_cmvn', default=True, type=boolean_string, help='Apply CMVN on feature', required=False)
parser.add_argument('--n_jobs', default=(- 1), type=int, help='Number of jobs used for feature extraction', required=False)
parser.add_argument('--name', default='None', type=str, help='Name of the output directory', required=False)
args = parser.parse_args()
return args
|
def acoustic_preprocess(args, tr_set, dim, audio_extention):
for (i, s) in enumerate(tr_set):
print('')
print('Preprocessing data in: ', s, end='')
todo = list(Path(os.path.join(args.data_root, s)).rglob(('*' + audio_extention)))
print(len(todo), 'audio files found.')
if (args.name == 'None'):
output_dir = os.path.join(args.output_path, '_'.join(['NewData', (str(args.feature_type) + str(dim))]))
else:
output_dir = os.path.join(args.output_path, args.name)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
cur_path = os.path.join(output_dir, tr_set[i])
if (not os.path.exists(cur_path)):
os.makedirs(cur_path)
print('Extracting acoustic feature...', flush=True)
tr_x = Parallel(n_jobs=args.n_jobs)((delayed(extract_feature)(str(file), feature=args.feature_type, delta=args.delta, delta_delta=args.delta_delta, cmvn=args.apply_cmvn, save_feature=os.path.join(cur_path, str(file).split('/')[(- 1)].replace(audio_extention, ''))) for file in tqdm(todo)))
sorted_todo = [os.path.join(tr_set[i], str(todo[idx]).split('/')[(- 1)].replace(audio_extention, '.npy')) for idx in reversed(np.argsort(tr_x))]
df = pd.DataFrame(data={'file_path': [fp for fp in sorted_todo], 'length': list(reversed(sorted(tr_x))), 'label': None})
df.to_csv(os.path.join(output_dir, (tr_set[i] + '.csv')))
print('All done, saved at', output_dir, 'exit.')
|
def main():
args = get_preprocess_args()
mel_dim = (num_mels * ((1 + int(args.delta)) + int(args.delta_delta)))
mfcc_dim = (num_mfcc * ((1 + int(args.delta)) + int(args.delta_delta)))
dim = (num_freq if (args.feature_type == 'linear') else (mfcc_dim if (args.feature_type == 'mfcc') else mel_dim))
print('Delta: ', args.delta, '. Delta Delta: ', args.delta_delta, '. Cmvn: ', args.apply_cmvn)
for (idx, s) in enumerate(SETS):
print('\t', idx, ':', s)
tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ')
tr_set = [SETS[int(t)] for t in tr_set.split(' ')]
acoustic_preprocess(args, tr_set, dim, args.audio_extention)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.