code stringlengths 17 6.64M |
|---|
class EventBasedScore(SoundEventScore):
'\n event-based scores - the ground truth and system output are compared at\n event instance level;\n\n See https://tut-arg.github.io/sed_eval/generated/sed_eval.sound_event.EventBasedMetrics.html # noqa: E501\n for params.\n '
score_class = sed_eval.sound_event.EventBasedMetrics
|
class MeanAveragePrecision(ScoreFunction):
'\n Average Precision is calculated in macro mode which calculates\n AP at a class level followed by macro-averaging across the classes.\n '
name = 'mAP'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
'\n Based on suggestions from Eduardo Fonseca -\n Equal weighting is assigned to each class regardless\n of its prior, which is commonly referred to as macro\n averaging, following Hershey et al. (2017); Gemmeke et al.\n (2017).\n This means that rare classes are as important as common\n classes.\n\n Issue with average_precision_score, when all ground truths are negative\n https://github.com/scikit-learn/scikit-learn/issues/8245\n This might come up in small tasks, where few samples are available\n '
return average_precision_score(targets, predictions, average='macro')
|
class DPrime(ScoreFunction):
'\n DPrime is calculated per class followed by averaging across the classes\n\n Code adapted from code provided by Eduoard Fonseca.\n '
name = 'd_prime'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
try:
auc = roc_auc_score(targets, predictions, average=None)
d_prime = (stats.norm().ppf(auc) * np.sqrt(2.0))
d_prime_macro = np.mean(d_prime)
return d_prime_macro
except ValueError:
return np.nan
|
class AUCROC(ScoreFunction):
'\n AUCROC (macro mode) is calculated per class followed by averaging across the\n classes\n '
name = 'aucroc'
def _compute(self, predictions: np.ndarray, targets: np.ndarray, **kwargs) -> float:
assert (predictions.ndim == 2)
assert (targets.ndim == 2)
try:
auc = roc_auc_score(targets, predictions, average='macro')
return auc
except ValueError:
return np.nan
|
class AutoregressiveReconstructionTask(Task):
'\n Attributes:\n upstream (torch.nn.Module): The upstream encoder (transformers, rnn, etc) that outputs `hidden_states`\n predictor (torch.nn.Module): The pre-training predictor that takes `hidden_states` as input and maps to the task target\n loss (torch.nn Loss Functions): The reconstruction loss (torch.nn.L1Loss, torch.nn.MSELoss, etc)\n '
def __init__(self, upstream: UpstreamExample, predictor: PredictorExample, loss: torch.nn.L1Loss, **kwargs):
'\n The input feature does not necessary have to be the same as the target feature.\n\n Args:\n upstream (Encoder)\n predictor (Predictor)\n loss (reconstruction loss)\n feat_A -> upstream -> predictor -> feat_B\n loss(feat_A, feat_B)\n '
super().__init__()
self.upstream = upstream
self.predictor = predictor
self.loss = loss()
def predict(self, x: torch.Tensor, label: torch.Tensor, x_len: list):
'\n Args:\n x (torch.Tensor): source_feat - (batch_size, timestamps, input_size)\n label (torch.Tensor): target_feat - (batch_size, timestamps, output_size)\n x_len (torch.Tensor): (batch_size) list of the original feature sequence length minus the value of `n_future`\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
upstream_output: torch.Tensor = self.upstream(x, x_len.tolist())
prediction: torch.Tensor = self.predictor(upstream_output).prediction
reconstruction_loss = self.loss(prediction, label)
return Output(loss=reconstruction_loss, hidden_states=upstream_output.hidden_states, prediction=prediction)
def _general_forward(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str]):
(loss, hidden_states, prediction) = self.predict(x, label, x_len).slice(3)
logs = Logs()
logs.add_hidden_state('hidden_states', hidden_states)
logs.add_hidden_state('prediction', prediction)
return Output(loss=loss, prediction=prediction, label=label, unique_name=unique_name, logs=logs)
def _general_reduction(self, batch_results: list, on_epoch_end: bool=None):
losses = []
for batch_result in batch_results:
losses.append(batch_result.loss)
loss = (sum(losses) / len(losses)).item()
logs = Logs()
logs.add_scalar('loss', loss)
return Output(logs=logs)
def train_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
'\n Each forward step in the training loop\n\n Args:\n source_feat (torch.Tensor): (batch_size, timestamps, input_size)\n target_feat (torch.Tensor): (batch_size, timestamps, output_size)\n feat_len (int): length of the original feature sequence minus `n_future`\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
return self._general_forward(x, label, x_len, unique_name)
def train_reduction(self, batch_results: list, on_epoch_end: bool=False):
'\n After several forward steps, outputs should be collected untouched (but detaching the Tensors)\n into a list and passed as batch_results. This function examine the collected items and compute\n metrics across these batches. This function might be called in the middle of an epoch for quick\n logging, or after exactly an epoch to know the epoch level performance.\n\n Args:\n batch_results (List[cacheable version of the output of self.train_step])\n on_epoch_end (bool):\n usually you should keep the same behavior between sub-epoch and epoch level\n this parameter is here in case you need specific postprocessing which must\n only be done right on the end of an epoch\n\n Return:\n logs (List[Log]):\n a list of content to log onto any logger\n each content should be in the Log class format\n '
return self._general_reduction(batch_results, on_epoch_end)
def valid_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
return self._general_forward(x, label, x_len, unique_name)
def test_step(self, x: torch.Tensor, label: torch.Tensor, x_len: int, unique_name: List[str], **kwargs):
return self._general_forward(x, label, x_len, unique_name)
def valid_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
def test_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
|
class Task(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def get_state(self):
return {}
def set_state(self, state: dict):
pass
def parse_cached_results(self, cached_results: List[dict]):
keys = list(cached_results[0].keys())
dol = defaultdict(list)
for d in cached_results:
assert (sorted(keys) == sorted(list(d.keys())))
for (k, v) in d.items():
if isinstance(v, (tuple, list)):
dol[k].extend(v)
else:
dol[k].append(v)
return dict(dol)
@abc.abstractmethod
def predict(self):
raise NotImplementedError
def forward(self, mode: str, *args, **kwargs):
return getattr(self, f'{mode}_step')(*args, **kwargs)
def reduction(self, mode: str, *args, **kwargs):
return getattr(self, f'{mode}_reduction')(*args, **kwargs)
@abc.abstractmethod
def train_step(self):
raise NotImplementedError
@abc.abstractmethod
def valid_step(self):
raise NotImplementedError
@abc.abstractmethod
def test_step(self):
raise NotImplementedError
@abc.abstractmethod
def train_reduction(self):
raise NotImplementedError
@abc.abstractmethod
def valid_reduction(self):
raise NotImplementedError
@abc.abstractmethod
def test_reduction(self):
raise NotImplementedError
|
class FeatReconstructionTask(Task):
'\n Attributes:\n upstream (torch.nn.Module): The upstream encoder (transformers, rnn, etc) that outputs `hidden_states`\n predictor (torch.nn.Module): The pre-training predictor that takes `hidden_states` as input and maps to the task target\n loss (torch.nn Loss Functions): The reconstruction loss (torch.nn.L1Loss, torch.nn.MSELoss, etc)\n '
def __init__(self, upstream: UpstreamExample, predictor: PredictorExample, loss: torch.nn.L1Loss, loss_config: dict={}, **kwargs):
'\n The input feature does not necessary have to be the same as the target feature.\n\n Args:\n upstream (Encoder)\n predictor (Projection NN)\n loss (reconstruction loss)\n feat_A -> upstream -> predictor -> feat_B\n loss(feat_A, feat_B)\n '
super().__init__()
self.upstream = upstream
self.predictor = predictor
self.loss = loss(**loss_config)
def predict(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None):
'\n Args:\n x (torch.Tensor): source_feat - (batch_size, timestamps, input_size)\n label (torch.Tensor): target_feat - (batch_size, timestamps, output_size)\n label_mask (torch.BoolTensor): (batch_size, timestamps, output_size)\n position_encoding (torch.Tensor): (batch_size, timestamps, input_size)\n attention_mask (torch.LongTensor): (batch_size, timestamps)\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
if ((position_encoding is None) and (attention_mask is None)):
upstream_output: torch.Tensor = self.upstream(x)
else:
upstream_output: torch.Tensor = self.upstream(x, position_encoding, attention_mask)
prediction: torch.Tensor = self.predictor(upstream_output).prediction
if (label_mask is None):
reconstruction_loss = self.loss(prediction, label)
else:
assert (label_mask.sum() > 0), 'Without any masking, loss might go NaN.'
reconstruction_loss = self.loss(prediction.masked_select(label_mask), label.masked_select(label_mask))
return Output(loss=reconstruction_loss, hidden_states=upstream_output.hidden_states, prediction=prediction)
def _general_forward(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None):
(loss, hidden_states, prediction) = self.predict(x, label, label_mask, position_encoding, attention_mask).slice(3)
logs = Logs()
logs.add_hidden_state('hidden_states', hidden_states)
logs.add_hidden_state('prediction', prediction)
return Output(loss=loss, prediction=prediction, label=label, unique_name=unique_name, logs=logs)
def _general_reduction(self, batch_results: list, on_epoch_end: bool=None):
losses = []
for batch_result in batch_results:
losses.append(batch_result.loss)
loss = (sum(losses) / len(losses)).item()
logs = Logs()
logs.add_scalar('loss', loss)
return Output(logs=logs)
def train_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
'\n Each forward step in the training loop\n\n Args:\n source_feat (torch.Tensor): (batch_size, timestamps, input_size)\n target_feat (torch.Tensor): (batch_size, timestamps, output_size)\n label_mask (torch.BoolTensor): (batch_size, timestamps, output_size)\n pos_enc (torch.Tensor): (batch_size, timestamps, input_size)\n attn_mask (torch.LongTensor): (batch_size, timestamps)\n\n Return:\n hidden_states (torch.Tensor): (batch_size, timestamps, hidden_size)\n loss (torch.Tensor): scalar.\n prediction (torch.Tensor): (batch_size, timestamps, output_size)\n '
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def train_reduction(self, batch_results: list, on_epoch_end: bool=False):
'\n After several forward steps, outputs should be collected untouched (but detaching the Tensors)\n into a list and passed as batch_results. This function examine the collected items and compute\n metrics across these batches. This function might be called in the middle of an epoch for quick\n logging, or after exactly an epoch to know the epoch level performance.\n\n Args:\n batch_results (List[cacheable version of the output of self.train_step])\n on_epoch_end (bool):\n usually you should keep the same behavior between sub-epoch and epoch level\n this parameter is here in case you need specific postprocessing which must\n only be done right on the end of an epoch\n\n Return:\n logs (List[Log]):\n a list of content to log onto any logger\n each content should be in the Log class format\n '
return self._general_reduction(batch_results, on_epoch_end)
def valid_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def test_step(self, x: torch.Tensor, label: torch.Tensor, label_mask: torch.BoolTensor=None, position_encoding: torch.Tensor=None, attention_mask: torch.LongTensor=None, unique_name: List[str]=None, **kwargs):
return self._general_forward(x, label, label_mask, position_encoding, attention_mask, unique_name)
def valid_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
def test_reduction(self, batch_results: list, on_epoch_end: bool=True):
return self._general_reduction(batch_results, on_epoch_end)
|
class OneHotToCrossEntropyLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, y_hat: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
assert torch.all((torch.sum(y, dim=1) == y.new_ones(y.shape[0])))
y = y.argmax(dim=1)
return self.loss(y_hat, y)
|
class ScenePredictionTask(Task):
def __init__(self, model: torch.nn.Module, category: CategoryEncoder, prediction_type: str, scores: List[str]):
super().__init__()
self.model = model
self.label_to_idx = {str(category.decode(idx)): idx for idx in range(len(category))}
self.idx_to_label = {idx: str(category.decode(idx)) for idx in range(len(category))}
self.scores = [available_scores[score](label_to_idx=self.label_to_idx) for score in scores]
if (prediction_type == 'multilabel'):
self.activation: torch.nn.Module = torch.nn.Sigmoid()
self.logit_loss = torch.nn.BCEWithLogitsLoss()
elif (prediction_type == 'multiclass'):
self.activation = torch.nn.Softmax(dim=(- 1))
self.logit_loss = OneHotToCrossEntropyLoss()
else:
raise ValueError(f'Unknown prediction_type {prediction_type}')
def predict(self, x, x_len):
(logits, _) = self.model(x, x_len)
prediction = self.activation(logits)
return (prediction, logits)
def forward(self, _mode: str, x, x_len, y, labels, unique_name: str, _dump_dir: str=None):
(y_pr, y_hat) = self.predict(x, x_len)
loss = self.logit_loss(y_hat.float(), y.float())
cacheable = dict(loss=loss.detach().cpu().item(), label=y.detach().cpu().unbind(dim=0), logit=y_hat.detach().cpu().unbind(dim=0), prediction=y_pr.detach().cpu().unbind(dim=0))
return (loss, cacheable)
def log_scores(self, score_args):
'Logs the metric score value for each score defined for the model'
assert hasattr(self, 'scores'), 'Scores for the model should be defined'
end_scores = {}
for score in self.scores:
score_ret = score(*score_args)
validate_score_return_type(score_ret)
if isinstance(score_ret, tuple):
end_scores[f'{score}'] = score_ret[0][1]
for (subscore, value) in score_ret:
end_scores[f'{score}_{subscore}'] = value
elif isinstance(score_ret, float):
end_scores[f'{score}'] = score_ret
else:
raise ValueError(f'Return type {type(score_ret)} is unexpected. Return type of the score function should either be a tuple(tuple) or float.')
return end_scores
def reduction(self, _mode: str, cached_results: List[dict], _dump_dir: str=None):
result = self.parse_cached_results(cached_results)
target = torch.stack(result['label'], dim=0)
prediction_logit = torch.stack(result['logit'], dim=0)
prediction = torch.stack(result['prediction'], dim=0)
loss = self.logit_loss(prediction_logit, target)
logs = dict(loss=loss.detach().cpu().item())
if (_mode in ['valid', 'test']):
logs.update(self.log_scores(score_args=(prediction.detach().cpu().numpy(), target.detach().cpu().numpy())))
return logs
|
class SpeakerClassifier(torch.nn.Module):
'\n Attributes:\n input_size: int\n output_size: int\n '
def __init__(self, input_size=3, output_size=4):
super().__init__()
self._input_size = input_size
self._output_size = output_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.Tensor): (batch_size, timestemps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n output (torch.Tensor): (batch_size, output_size)\n '
assert (x.size((- 1)) == self.input_size)
output = torch.randn(x.size(0), self.output_size)
assert output
|
class SpeakerVerification(Task):
'\n model.output_size should match len(categories)\n\n Args:\n model (SpeakerClassifier):\n actual model or a callable config for the model\n categories (dict[str]):\n each key in the Dictionary is the final prediction content in str.\n use categories[key] to encode as numeric label\n test_trials (List[Tuple[int, str, str]]):\n each tuple in the list consists of (label, enroll_utt, test_utt)\n loss_type (str): softmax or amsoftmax\n loss_conf (dict): arguments for the loss_type class\n '
def __init__(self, model: SpeakerClassifier, category: CategoryEncoder, test_trials: List[Tuple[(int, str, str)]]=None, loss_type: str='amsoftmax', loss_conf: dict=None):
super().__init__()
self.model = model
self.category = category
self.trials = test_trials
if (loss_type == 'amsoftmax'):
loss_cls = amsoftmax
elif (loss_type == 'softmax'):
loss_cls = softmax
else:
raise ValueError(f'Unsupported loss_type {loss_type}')
self.loss: torch.nn.Module = loss_cls(input_size=self.model.output_size, output_size=len(self.category), **loss_conf)
assert (self.loss.output_size == len(category))
def get_state(self):
return {'loss_state': self.loss.state_dict()}
def set_state(self, state: dict):
self.loss.load_state_dict(state['loss_state'])
def predict(self, x: torch.Tensor, x_len: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n torch.Tensor\n\n (batch_size, output_size)\n '
spk_embeddings = self.model(x, x_len)
return spk_embeddings
def train_step(self, x: torch.Tensor, x_len: torch.LongTensor, class_id: torch.LongTensor, unique_name: List[str], _dump_dir: str=None):
spk_embeddings = self.predict(x, x_len)
(loss, logits) = self.loss(spk_embeddings, class_id)
prediction = [index for index in logits.argmax(dim=(- 1)).detach().cpu().tolist()]
cacheable = dict(loss=loss.detach().cpu().item(), class_id=class_id.detach().cpu().tolist(), prediction=prediction, unique_name=unique_name)
return (loss, cacheable)
def train_reduction(self, cached_results: list, _dump_dir: str=None):
results = self.parse_cached_results(cached_results)
acc = accuracy(results['prediction'], results['class_id'])
loss = torch.FloatTensor(results['loss']).mean().item()
return dict(loss=loss, accuracy=acc)
def test_step(self, x: torch.Tensor, x_len: torch.LongTensor, unique_name: List[str], _dump_dir: str):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len: torch.LongTensor\n unique_name (List[str])\n\n Return:\n unique_name (List[str])\n output (torch.Tensor):\n speaker embeddings corresponding to unique_name\n '
spk_embeddings = self.predict(x, x_len)
cacheable = dict(unique_name=unique_name.tolist(), spk_embedding=spk_embeddings.detach().cpu().unbind(dim=0))
return (None, cacheable)
def test_reduction(self, cached_results: List[dict], _dump_dir: str):
results = self.parse_cached_results(cached_results)
embeddings = {}
for (name, emb) in zip(results['unique_name'], results['spk_embedding']):
embeddings[name] = emb
trials = self.trials
scores = []
labels = []
for (label, enroll, test) in tqdm(trials, desc='Test Scoring', total=len(trials)):
enroll_embd = embeddings[enroll]
test_embd = embeddings[test]
score = F.cosine_similarity(enroll_embd, test_embd, dim=0).item()
scores.append(score)
labels.append(label)
(EER, EERthreshold) = compute_eer(labels, scores)
(minDCF, minDCFthreshold) = compute_minDCF(labels, scores, p_target=0.01)
return dict(EER=EER, EERthreshold=EERthreshold.item(), minDCF=minDCF, minDCF_threshold=minDCFthreshold)
|
class Speech2TextCTCExample(nn.Module):
'An example speech-to-text task with CTC objective\n\n Args:\n input_size (int, optional): Input size. Defaults to 3.\n output_size (int, optional): Output size. Defaults to 4.\n '
def __init__(self, input_size=3, output_size=4):
super().__init__()
self._input_size = input_size
self._output_size = output_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._output_size
def forward(self, x, x_len):
'\n Args:\n x (torch.Tensor): (batch_size, timestemps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n y (torch.Tensor): (batch_size, output_size)\n y_len (torch.LongTensor): (batch_size)\n '
assert (x.size((- 1)) == self.input_size)
output = torch.randn(x.size(0), x.size(1), self.output_size)
assert output, x_len
|
class Speech2TextCTCTask(Task):
'Speech-to-text task with CTC objective\n\n Args:\n model (Speech2TextCTCExample)\n tokenizer (Tokenizer): Text tokenizer.\n decoder (Union[BeamDecoder, dict], optional):\n Beam decoder or decoder\'s config. Defaults to None.\n log_metrics (List[str], optional):\n Metrics to be logged. Defaults to ["cer", "wer"].\n '
def __init__(self, model: torch.nn.Module, tokenizer: Tokenizer, decoder: Union[(BeamDecoder, dict)]=None, log_metrics: List[str]=['cer', 'wer']) -> None:
super().__init__()
self.model = model
assert isinstance(tokenizer, Tokenizer)
self.tokenizer = tokenizer
self.log_metrics = log_metrics
if (BeamDecoder is None):
decoder = None
if isinstance(decoder, dict):
decoder = BeamDecoder(**decoder)
logger.info('Using flashlight decoder.')
self.decoder = decoder
self.criterion = nn.CTCLoss(blank=self.tokenizer.pad_idx, zero_infinity=True)
def predict(self, x: torch.Tensor, x_len: torch.LongTensor):
'\n Args:\n x (torch.Tensor): (batch_size, timestamps, input_size)\n x_len (torch.LongTensor): (batch_size, )\n\n Return:\n logits (torch.Tensor): (batch_size, timestamps, output_size)\n prediction (list): prediction strings\n valid_length (torch.LongTensor): (batch_size, )\n '
(logits, x_len) = self.model(x, x_len)
predicted_tokens = torch.argmax(logits, dim=2).detach().cpu()
filtered_tokens = [[token for token in pred_token.unique_consecutive().tolist() if ((token != self.tokenizer.pad_idx) and (token != self.tokenizer.eos_idx))] for pred_token in predicted_tokens]
predictions = [self.tokenizer.decode(token_list) for token_list in filtered_tokens]
return (logits, predictions, x_len)
def forward(self, _mode: str, x: torch.Tensor, x_len: torch.LongTensor, labels: np.ndarray, class_ids: torch.LongTensor, unique_name: np.ndarray, beam_decode: bool=False, _dump_dir: str=None):
'\n Each forward step in the training loop\n\n Args:\n mode (str): train / valid / test\n x (torch.Tensor):\n Input waveform or acoustic features.\n (batch_size, timestamps, input_size)\n x_len (torch.LongTensor):\n Lengths of inputs.\n (batch_size, )\n labels (np.ndarray):\n Ground truth transcriptions (str).\n (batch_size, )\n class_ids (torch.LongTensor):\n Tokenized ground truth transcriptions.\n unique_name (np.ndarray):\n Unique names for each sample.\n\n '
(logits, prediction, x_len) = self.predict(x, x_len)
log_probs = F.log_softmax(logits, dim=2)
y = class_ids
y_len = torch.tensor([(ids != self.tokenizer.pad_idx).long().sum() for ids in class_ids], dtype=torch.long, device=logits.device)
loss = self.criterion(log_probs.transpose(0, 1), y, x_len, y_len)
hyps = None
if (beam_decode and (self.decoder is not None)):
hyps = self.decoder.decode(log_probs.detach())
cacheable = dict(loss=loss.detach().cpu().item(), prediction=prediction, label=labels.tolist(), unique_name=unique_name.tolist(), hypotheses=hyps)
return (loss, cacheable)
def reduction(self, _mode: str, cached_results: List[dict], _dump_dir: str=None):
results = self.parse_cached_results(cached_results)
losses = results['loss']
predictions = results['prediction']
labels = results['label']
unique_names = results['unique_name']
if (_dump_dir is not None):
with (Path(_dump_dir) / 'hyp').open('w') as f:
f.writelines([f'''{uid} {p}
''' for (p, uid) in zip(predictions, unique_names)])
with (Path(_dump_dir) / 'ref').open('w') as f:
f.writelines([f'''{uid} {p}
''' for (p, uid) in zip(labels, unique_names)])
beam_hyps = None
if (results['hypotheses'][0] is not None):
beam_hyps = [' '.join(hyp[0].words) for hyp in results['hypotheses']]
logs = {}
logs['loss'] = float(np.mean(losses))
if ('wer' in self.log_metrics):
logs['wer'] = wer(predictions, labels)
if ('cer' in self.log_metrics):
logs['cer'] = cer(predictions, labels)
if ('per' in self.log_metrics):
logs['per'] = per(predictions, labels)
if ('slot_type_f1' in self.log_metrics):
logs['slot_type_f1'] = slot_type_f1(predictions, labels)
if ('slot_value_cer' in self.log_metrics):
logs['slot_value_cer'] = slot_value_cer(predictions, labels)
if ('slot_value_wer' in self.log_metrics):
logs['slot_value_wer'] = slot_value_wer(predictions, labels)
if ('slot_edit_f1_full' in self.log_metrics):
logs['slot_edit_f1_full'] = slot_edit_f1_full(predictions, labels)
if ('slot_edit_f1_part' in self.log_metrics):
logs['slot_edit_f1_part'] = slot_edit_f1_part(predictions, labels)
if (beam_hyps is not None):
logs['wer_beam'] = wer(beam_hyps, labels)
logs['char_beam'] = cer(beam_hyps, labels)
return logs
|
class CMVN(nn.Module):
__constants__ = ['mode', 'dim', 'eps']
def __init__(self, mode='global', dim=2, eps=1e-10):
super(CMVN, self).__init__()
if (mode != 'global'):
raise NotImplementedError('Only support global mean variance normalization.')
self.mode = mode
self.dim = dim
self.eps = eps
def forward(self, x):
if (self.mode == 'global'):
return ((x - x.mean(self.dim, keepdim=True)) / (self.eps + x.std(self.dim, keepdim=True)))
def extra_repr(self):
return 'mode={}, dim={}, eps={}'.format(self.mode, self.dim, self.eps)
|
class FeatureExtractor(nn.Module):
'Feature extractor, transforming file path to Mel spectrogram'
def __init__(self, mode='fbank', num_mel_bins=80, decode_wav=False, apply_cmvn=True, **kwargs):
super(FeatureExtractor, self).__init__()
assert (mode == 'fbank'), 'Only Mel-spectrogram implemented'
self.mode = mode
self.extract_fn = kaldi.fbank
self.apply_cmvn = apply_cmvn
if self.apply_cmvn:
self.cmvn = CMVN()
self.num_mel_bins = num_mel_bins
self.kwargs = kwargs
self.decode_wav = decode_wav
if self.decode_wav:
torchaudio.set_audio_backend('soundfile')
def _load_file(self, filepath):
if self.decode_wav:
(waveform, sample_rate) = torchaudio.load_wav(filepath)
else:
(waveform, sample_rate) = torchaudio.load(filepath)
return (waveform, sample_rate)
def forward(self, waveform):
y = self.extract_fn(waveform, num_mel_bins=self.num_mel_bins, sample_frequency=SAMPLE_RATE, window_type=WINDOW_TYPE, **self.kwargs)
if self.apply_cmvn:
y = y.transpose(0, 1).unsqueeze(0)
y = self.cmvn(y)
y = y.squeeze(0).transpose(0, 1)
return y
def extra_repr(self):
return 'mode={}, num_mel_bins={}'.format(self.mode, self.num_mel_bins)
def create_msg(self):
'List msg for verbose function'
msg = 'Audio spec.| Audio feat. = {}\t\t| feat. dim = {}\t| CMVN = {}'.format(self.mode, self.num_mel_bins, self.apply_cmvn)
return [msg]
|
def create_transform(audio_config):
feat_type = audio_config.pop('feat_type')
feat_dim = audio_config.pop('feat_dim')
decode_wav = audio_config.pop('decode_wav', False)
apply_cmvn = audio_config.pop('cmvn', True)
transforms = FeatureExtractor(feat_type, feat_dim, decode_wav, apply_cmvn, **audio_config)
return (transforms, feat_dim)
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
ckpt = torch.load(ckpt, map_location='cpu')
config = ckpt['config']
(self.preprocessor, feat_dim) = create_transform(config['data']['audio'])
self.model = APC(feat_dim, **config['model']['paras'])
self.model.load_state_dict(ckpt['model'])
if (len(self.hooks) == 0):
self.add_hook('self.model.rnn_layers[1]', (lambda input, output: pad_packed_sequence(input[0], batch_first=True)[0]))
self.add_hook('self.model.rnn_layers[2]', (lambda input, output: pad_packed_sequence(input[0], batch_first=True)[0]))
self.add_hook('self.model', (lambda input, output: output[1]))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
features = [self.preprocessor(wav.unsqueeze(0)) for wav in wavs]
feat_lengths = [len(feat) for feat in features]
features = pad_sequence(features, batch_first=True)
feat_lengths = torch.LongTensor(feat_lengths)
(predicted_BxLxM, features) = self.model(features, feat_lengths, testing=(not self.training))
|
def apc_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def apc_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return apc_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def apc(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return apc_360hr(*args, refresh=refresh, **kwargs)
|
def apc_360hr(refresh=False, *args, **kwargs):
'\n The apc standard model on 360hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/apc_360hr.ckpt'
return apc_url(*args, refresh=refresh, **kwargs)
|
def apc_960hr(refresh=False, *args, **kwargs):
'\n The apc standard model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
kwargs['ckpt'] = 'https://huggingface.co/leo19941227/apc_series/resolve/main/apc_960hr.ckpt'
return apc_url(*args, refresh=refresh, **kwargs)
|
class VQLayer(nn.Module):
def __init__(self, input_size, codebook_size, code_dim, gumbel_temperature):
'\n Defines a VQ layer that follows an RNN layer.\n input_size: an int indicating the pre-quantized input feature size,\n usually the hidden size of RNN.\n codebook_size: an int indicating the number of codes.\n code_dim: an int indicating the size of each code. If not the last layer,\n then must equal to the RNN hidden size.\n gumbel_temperature: a float indicating the temperature for gumbel-softmax.\n '
super(VQLayer, self).__init__()
self.codebook_size = codebook_size
self.vq_logits = nn.Linear(input_size, codebook_size)
self.gumbel_temperature = gumbel_temperature
self.codebook_CxE = nn.Linear(codebook_size, code_dim, bias=False)
self.token_usg = np.zeros(codebook_size)
def forward(self, inputs_BxLxI, testing, lens=None):
logits_BxLxC = self.vq_logits(inputs_BxLxI)
if testing:
shape = logits_BxLxC.size()
(_, ind) = logits_BxLxC.max(dim=(- 1))
onehot_BxLxC = torch.zeros_like(logits_BxLxC).view((- 1), shape[(- 1)])
onehot_BxLxC.scatter_(1, ind.view((- 1), 1), 1)
onehot_BxLxC = onehot_BxLxC.view(*shape)
else:
onehot_BxLxC = gumbel_softmax(logits_BxLxC, tau=self.gumbel_temperature, hard=True, eps=EPS, dim=(- 1))
self.token_usg += onehot_BxLxC.detach().cpu().reshape((- 1), self.codebook_size).sum(dim=0).numpy()
codes_BxLxE = self.codebook_CxE(onehot_BxLxC)
return (logits_BxLxC, codes_BxLxE)
def report_ppx(self):
'Computes perplexity of distribution over codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
return (2 ** sum(((- acc_usg) * np.log2((acc_usg + EPS)))))
def report_usg(self):
'Computes usage each entry in codebook'
acc_usg = (self.token_usg / sum(self.token_usg))
self.token_usg = np.zeros(self.codebook_size)
return acc_usg
|
def ast(refresh: bool=False, window_secs: float=10.24, stride_secs: float=10.24, **kwds):
kwds['ckpt'] = _urls_to_filepaths('https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1', refresh=refresh)
return _UpstreamExpert(window_secs=window_secs, stride_secs=stride_secs, **kwds)
|
def audio_albert_local(ckpt, *args, **kwargs):
'\n The model from local ckpt\n ckpt (str): PATH\n feature_selection (int): -1 (default, the last layer) or an int in range(0, max_layer_num)\n '
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs)
|
def audio_albert_url(ckpt, refresh=False, *args, **kwargs):
'\n The model from URL\n ckpt (str): URL\n '
return audio_albert_local(_urls_to_filepaths(ckpt, refresh=refresh), *args, **kwargs)
|
def audio_albert(refresh=False, *args, **kwargs):
'\n The default model\n refresh (bool): whether to download ckpt/config again if existed\n '
return audio_albert_960hr(*args, refresh=refresh, **kwargs)
|
def audio_albert_960hr(refresh=False, *args, **kwargs):
'\n The audio albert base model on 960hr\n refresh (bool): whether to download ckpt/config again if existed\n '
return audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1(*args, refresh=refresh, **kwargs)
|
def audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1(refresh=False, *args, **kwargs):
'\n Feature: 80-dim log Mel\n Alteration: time\n Optimizer: AdamW\n Batch size: 32\n Total steps: 1M\n Unlabled Speech: 960hr\n '
kwargs['ckpt'] = 'https://huggingface.co/s3prl/audio_albert/resolve/main/audio_albert_logMelBase_T_share_AdamW_b32_1m_960hr_drop1/states-1000000.ckpt'
return audio_albert_url(*args, refresh=refresh, **kwargs)
|
class UpstreamExpert(UpstreamBase):
'\n Extract baseline features from wavforms by torchaudio.compliance.kaldi or torchaudio preprocessor\n Support: spectrogram, fbank, mfcc, mel, linear\n '
def __init__(self, model_config, **kwargs):
super().__init__(**kwargs)
with open(model_config, 'r') as file:
self.config = yaml.load(file, Loader=yaml.FullLoader)
if ('kaldi' in self.config):
(self.extracter, self.output_dim, frame_shift) = get_extracter(self.config)
self.downsample_rate = round(((frame_shift * SAMPLE_RATE) / 1000))
else:
(self.extracter, self.output_dim, _) = get_preprocessor(self.config, process_input_only=True)
self.downsample_rate = round(((self.config.get('hop_ms', 10) * SAMPLE_RATE) / 1000))
def _extractor_forward(self, wavs):
feats = []
for wav in wavs:
feats.append(self.extracter(wav))
return feats
def get_downsample_rates(self, key: str) -> int:
return self.downsample_rate
def _preprocessor_forward(self, wavs):
wav_lengths = [len(wav) for wav in wavs]
feats = pad_sequence(wavs, batch_first=True)
feats = feats.unsqueeze(1)
feats = self.extracter(feats)[0]
ratio = (len(feats[0]) / wav_lengths[0])
feat_lengths = [round((l * ratio)) for l in wav_lengths]
feats = [f[:l] for (f, l) in zip(feats, feat_lengths)]
return feats
def forward(self, wavs):
if ('kaldi' in self.config):
feats = self._extractor_forward(wavs)
else:
feats = self._preprocessor_forward(wavs)
padded_feats = pad_sequence(feats, batch_first=True)
return {'last_hidden_state': padded_feats, 'hidden_states': [padded_feats]}
|
def get_extracter(config):
transforms = [ExtractAudioFeature(**config.get('kaldi', {})), Delta(**config.get('delta', {})), CMVN(**config.get('cmvn', {}))]
extracter = nn.Sequential(*transforms)
output_dim = extracter(torch.randn((EXAMPLE_SEC * SAMPLE_RATE))).size((- 1))
return (extracter, output_dim, extracter[0].frame_shift)
|
class ExtractAudioFeature(nn.Module):
def __init__(self, feat_type='fbank', **kwargs):
super(ExtractAudioFeature, self).__init__()
self.extract_fn = eval(f'torchaudio.compliance.kaldi.{feat_type}')
self.kwargs = kwargs[feat_type]
self.frame_shift = self.kwargs.get('frame_shift', 10.0)
def forward(self, waveform):
x = self.extract_fn(waveform.view(1, (- 1)), sample_frequency=SAMPLE_RATE, **self.kwargs)
return x
|
class Delta(nn.Module):
def __init__(self, order=2, **kwargs):
super(Delta, self).__init__()
self.order = order
self.compute_delta = transforms.ComputeDeltas(**kwargs)
def forward(self, x):
feats = [x]
for o in range(self.order):
feat = feats[(- 1)].transpose(0, 1).unsqueeze(0)
delta = self.compute_delta(feat)
feats.append(delta.squeeze(0).transpose(0, 1))
x = torch.cat(feats, dim=(- 1))
return x
|
class CMVN(nn.Module):
def __init__(self, use_cmvn, eps=1e-10):
super(CMVN, self).__init__()
self.eps = eps
self.use_cmvn = use_cmvn
def forward(self, x):
if self.use_cmvn:
x = ((x - x.mean(dim=0, keepdim=True)) / (self.eps + x.std(dim=0, keepdim=True)))
return x
|
def baseline_local(model_config, *args, **kwargs):
'\n Baseline feature\n model_config: PATH\n '
assert os.path.isfile(model_config)
return _UpstreamExpert(model_config, *args, **kwargs)
|
def baseline(*args, **kwargs):
'\n Baseline feature - Fbank, or Mel-scale spectrogram\n '
return fbank(*args, **kwargs)
|
def spectrogram(*args, **kwargs):
'\n Baseline feature - Linear-scale spectrogram\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'spectrogram.yaml')
return baseline_local(*args, **kwargs)
|
def fbank(*args, **kwargs):
'\n Baseline feature - Fbank, or Mel-scale spectrogram\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'fbank.yaml')
return baseline_local(*args, **kwargs)
|
def fbank_no_cmvn(*args, **kwargs):
'\n Baseline feature - Fbank, or Mel-scale spectrogram\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'fbank_no_cmvn.yaml')
return baseline_local(*args, **kwargs)
|
def mfcc(*args, **kwargs):
'\n Baseline feature - MFCC\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'mfcc.yaml')
return baseline_local(*args, **kwargs)
|
def mel(*args, **kwargs):
'\n Baseline feature - Mel\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'mel.yaml')
return baseline_local(*args, **kwargs)
|
def linear(*args, **kwargs):
'\n Baseline feature - Linear\n '
kwargs['model_config'] = os.path.join(os.path.dirname(__file__), 'linear.yaml')
return baseline_local(*args, **kwargs)
|
def load_yaml_config(path_to_config):
'Loads yaml configuration settings as an EasyDict object.'
path_to_config = Path(path_to_config)
assert path_to_config.is_file()
with open(path_to_config) as f:
yaml_contents = yaml.safe_load(f)
return Namespace(**yaml_contents)
|
class PrecomputedNorm(nn.Module):
'Normalization using Pre-computed Mean/Std.\n Args:\n stats: Precomputed (mean, std).\n axis: Axis setting used to calculate mean/variance.\n '
def __init__(self, stats, axis=[1, 2]):
super().__init__()
self.axis = axis
(self.mean, self.std) = stats
def forward(self, X: torch.Tensor) -> torch.Tensor:
return ((X - self.mean) / self.std)
def __repr__(self):
format_string = (self.__class__.__name__ + f'(mean={self.mean}, std={self.std}, axis={self.axis})')
return format_string
|
class NetworkCommonMixIn():
'Common mixin for network definition.'
def load_weight(self, weight_file, device):
'Utility to load a weight file to a device.'
state_dict = torch.load(weight_file, map_location=device)
if ('state_dict' in state_dict):
state_dict = state_dict['state_dict']
weights = {}
for k in state_dict:
m = re.search('(^fc\\.|\\.fc\\.|^features\\.|\\.features\\.)', k)
if (m is None):
continue
new_k = k[m.start():]
new_k = (new_k[1:] if (new_k[0] == '.') else new_k)
weights[new_k] = state_dict[k]
self.load_state_dict(weights)
self.eval()
logging.info(f'Using audio embbeding network pretrained weight: {Path(weight_file).name}')
return self
def set_trainable(self, trainable=False):
for p in self.parameters():
if p.requires_grad:
p.requires_grad = trainable
|
class AudioNTT2020Task6(nn.Module, NetworkCommonMixIn):
'DCASE2020 Task6 NTT Solution Audio Embedding Network.'
def __init__(self, n_mels, d):
super().__init__()
self.features = nn.Sequential(nn.Conv2d(1, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2))
self.fc = nn.Sequential(nn.Linear((64 * (n_mels // (2 ** 3))), d), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(d, d), nn.ReLU())
self.d = d
def forward(self, x):
x = self.features(x)
x = x.permute(0, 3, 2, 1)
(B, T, D, C) = x.shape
x = x.reshape((B, T, (C * D)))
x = self.fc(x)
return x
|
class AudioNTT2020(AudioNTT2020Task6):
'BYOL-A General Purpose Representation Network.\n This is an extension of the DCASE 2020 Task 6 NTT Solution Audio Embedding Network.\n '
def __init__(self, n_mels=64, d=512):
super().__init__(n_mels=n_mels, d=d)
def forward(self, x):
x = super().forward(x)
(x1, _) = torch.max(x, dim=1)
x2 = torch.mean(x, dim=1)
x = (x1 + x2)
assert ((x.shape[1] == self.d) and (x.ndim == 2))
return x
|
def byol_a_2048(refresh=False, **kwds):
ckpt = _urls_to_filepaths('https://github.com/nttcslab/byol-a/raw/master/pretrained_weights/AudioNTT2020-BYOLA-64x96d2048.pth', refresh=refresh)
return _UpstreamExpert(ckpt, DEFAULT_CONFIG_PATH, 2048, **kwds)
|
def byol_a_1024(refresh=False, **kwds):
ckpt = _urls_to_filepaths('https://github.com/nttcslab/byol-a/raw/master/pretrained_weights/AudioNTT2020-BYOLA-64x96d1024.pth', refresh=refresh)
return _UpstreamExpert(ckpt, DEFAULT_CONFIG_PATH, 1024, **kwds)
|
def byol_a_512(refresh=False, **kwds):
ckpt = _urls_to_filepaths('https://github.com/nttcslab/byol-a/raw/master/pretrained_weights/AudioNTT2020-BYOLA-64x96d512.pth', refresh=refresh)
return _UpstreamExpert(ckpt, DEFAULT_CONFIG_PATH, 512, **kwds)
|
def default(val, def_val):
return (def_val if (val is None) else val)
|
def flatten(t):
return t.reshape(t.shape[0], (- 1))
|
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if (instance is not None):
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
|
def get_module_device(module):
return next(module.parameters()).device
|
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
|
def loss_fn(x, y):
x = F.normalize(x, dim=(- 1), p=2)
y = F.normalize(y, dim=(- 1), p=2)
return (2 - (2 * (x * y).sum(dim=(- 1))))
|
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if (old is None):
return new
return ((old * self.beta) + ((1 - self.beta) * new))
|
def update_moving_average(ema_updater, ma_model, current_model):
for (current_params, ma_params) in zip(current_model.parameters(), ma_model.parameters()):
(old_weight, up_weight) = (ma_params.data, current_params.data)
ma_params.data = ema_updater.update_average(old_weight, up_weight)
|
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096, use_bn=True):
super().__init__()
self.lin1 = nn.Linear(dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, projection_size)
self.use_bn = use_bn
self.bn = nn.BatchNorm1d(hidden_size)
self.relu = nn.ReLU(inplace=True)
'self.net = nn.Sequential(\n nn.Linear(dim, hidden_size),\n nn.BatchNorm1d(hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, projection_size)\n )'
def forward(self, x):
x = self.lin1(x)
if self.use_bn:
x = self.bn(x)
x = self.relu(x)
x = self.lin2(x)
return x
|
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=(- 2)):
super().__init__()
self.net = net
self.layer = layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = {}
self.hook_registered = False
def _find_layer(self):
if (type(self.layer) == str):
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif (type(self.layer) == int):
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, input, output):
device = input[0].device
self.hidden[device] = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert (layer is not None), f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
(_, dim) = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if (self.layer == (- 1)):
return self.net(x)
if (not self.hook_registered):
self._register_hook()
self.hidden.clear()
_ = self.net(x)
hidden = self.hidden[x.device]
self.hidden.clear()
assert (hidden is not None), f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x, return_projection=True):
representation = self.get_representation(x)
if (not return_projection):
return representation
projector = self._get_projector(representation)
projection = projector(representation)
return (projection, representation)
|
class BYOL(nn.Module):
'BYOL training module that is:\n - Decoupled augmentations.\n - Accepts two augmented inputs independently.\n '
def __init__(self, net, image_size, hidden_layer=(- 1), projection_size=256, projection_hidden_size=4096, moving_average_decay=0.99, use_momentum=True, channels=1):
super().__init__()
self.net = net
self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer)
self.use_momentum = use_momentum
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size)
device = get_module_device(net)
self.to(device)
with torch.no_grad():
self.forward(torch.randn(2, channels, image_size[0], image_size[1]), torch.randn(2, channels, image_size[0], image_size[1]))
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.use_momentum, 'you do not need to update the moving average, since you have turned off momentum for the target encoder'
assert (self.target_encoder is not None), 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater, self.target_encoder, self.online_encoder)
def forward(self, image_one, image_two, return_embedding=False, return_projection=True):
(online_proj_one, _) = self.online_encoder(image_one)
(online_proj_two, _) = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = (self._get_target_encoder() if self.use_momentum else self.online_encoder)
(target_proj_one, _) = target_encoder(image_one)
(target_proj_two, _) = target_encoder(image_two)
target_proj_one.detach_()
target_proj_two.detach_()
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = (loss_one + loss_two)
return loss.mean()
|
def get_timestamp():
'ex) Outputs 202104220830'
return datetime.datetime.now().strftime('%y%m%d%H%M')
|
def load_yaml_config(path_to_config):
'Loads yaml configuration settings as an EasyDict object.'
path_to_config = Path(path_to_config)
assert path_to_config.is_file()
with open(path_to_config) as f:
yaml_contents = yaml.safe_load(f)
cfg = Namespace(**yaml_contents)
return cfg
|
def get_logger(name):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M', level=logging.DEBUG)
logger = logging.getLogger(name)
return logger
|
class MelSpectrogramLibrosa():
'Mel spectrogram using librosa.'
def __init__(self, fs=16000, n_fft=1024, shift=160, n_mels=64, fmin=60, fmax=7800):
(self.fs, self.n_fft, self.shift, self.n_mels, self.fmin, self.fmax) = (fs, n_fft, shift, n_mels, fmin, fmax)
self.mfb = librosa.filters.mel(sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
def __call__(self, audio):
X = librosa.stft(np.array(audio), n_fft=self.n_fft, hop_length=self.shift)
return torch.tensor(np.matmul(self.mfb, ((np.abs(X) ** 2) + np.finfo(float).eps)))
|
class WaveInLMSOutDataset(Dataset):
'Wave in, log-mel spectrogram out, dataset class.\n\n Choosing librosa or torchaudio:\n librosa: Stable but slower.\n torchaudio: Faster but cannot reproduce the exact performance of pretrained weight,\n which might be caused by the difference with librosa. Librosa was used in the pretraining.\n\n Args:\n cfg: Configuration settings.\n audio_files: List of audio file pathnames.\n labels: List of labels corresponding to the audio files.\n transform: Transforms (augmentations), callable.\n use_librosa: True if using librosa for converting audio to log-mel spectrogram (LMS).\n '
def __init__(self, cfg, audio_files, labels, transform, use_librosa=False):
assert ((labels is None) or (len(audio_files) == len(labels))), 'The number of audio files and labels has to be the same.'
super().__init__()
self.cfg = cfg
self.files = audio_files
self.labels = labels
self.transform = transform
self.unit_length = int((cfg.unit_sec * cfg.sample_rate))
self.to_melspecgram = (MelSpectrogramLibrosa(fs=cfg.sample_rate, n_fft=cfg.n_fft, shift=cfg.hop_length, n_mels=cfg.n_mels, fmin=cfg.f_min, fmax=cfg.f_max) if use_librosa else torchaudio.transforms.MelSpectrogram(sample_rate=cfg.sample_rate, n_fft=cfg.n_fft, win_length=cfg.win_length, hop_length=cfg.hop_length, n_mels=cfg.n_mels, f_min=cfg.f_min, f_max=cfg.f_max, power=2))
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
try:
(wav, sr) = torchaudio.load(self.files[idx])
except RuntimeError:
print(self.files[idx])
raise FileNotFoundError(self.files[idx])
assert (sr == self.cfg.sample_rate), f'Convert .wav files to {self.cfg.sample_rate} Hz. {self.files[idx]} has {sr} Hz.'
assert (wav.shape[0] == 1), f'Convert .wav files to single channel audio, {self.files[idx]} has {wav.shape[0]} channels.'
wav = wav[0]
length_adj = (self.unit_length - len(wav))
if (length_adj > 0):
half_adj = (length_adj // 2)
wav = F.pad(wav, (half_adj, (length_adj - half_adj)))
length_adj = (self.unit_length - len(wav))
start = (random.randint(0, length_adj) if (length_adj > 0) else 0)
wav = wav[start:(start + self.unit_length)]
lms = (self.to_melspecgram(wav) + torch.finfo().eps).log().unsqueeze(0)
if self.transform:
lms = self.transform(lms)
if (self.labels is not None):
return (lms, torch.tensor(self.labels[idx]))
return lms
|
class AudioNTT2020Task6(nn.Module, NetworkCommonMixIn):
'DCASE2020 Task6 NTT Solution Audio Embedding Network.'
def __init__(self, n_mels, d):
super().__init__()
self.features = nn.Sequential(nn.Conv2d(1, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2), nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(2, stride=2))
self.fc = nn.Sequential(nn.Linear((64 * (n_mels // (2 ** 3))), d), nn.ReLU(), nn.Dropout(p=0.3), nn.Linear(d, d), nn.ReLU())
self.d = d
def forward(self, x):
x = self.features(x)
x = x.permute(0, 3, 2, 1)
(B, T, D, C) = x.shape
x = x.reshape((B, T, (C * D)))
x = self.fc(x)
return x
|
class AudioNTT2020(AudioNTT2020Task6):
'BYOL-A General Purpose Representation Network.\n\n This is an extension of the DCASE 2020 Task 6 NTT Solution Audio Embedding Network.\n '
sample_rate = 16000
embedding_size = 2048
scene_embedding_size = embedding_size
timestamp_embedding_size = embedding_size
def __init__(self, n_mels=64, d=512):
super().__init__(n_mels=n_mels, d=d)
def forward(self, x):
x = super().forward(x)
x = (x.mean(1) + x.amax(1))
assert ((x.shape[1] == self.d) and (x.ndim == 2))
return x
|
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int((not match))
return_val[ind][key] = d[key]
return (*return_val,)
|
def group_by_key_prefix_and_remove_prefix(prefix, d):
(kwargs_with_prefix, kwargs) = group_dict_by_key((lambda x: x.startswith(prefix)), d)
kwargs_without_prefix = dict(map((lambda x: (x[0][len(prefix):], x[1])), tuple(kwargs_with_prefix.items())))
return (kwargs_without_prefix, kwargs)
|
class LayerNorm(nn.Module):
'Layer normalization, but done in channel dimension #1'
def __init__(self, dim, eps=1e-05):
super().__init__()
self.eps = eps
self.g = nn.Parameter(torch.ones(1, dim, 1, 1))
self.b = nn.Parameter(torch.zeros(1, dim, 1, 1))
def forward(self, x):
std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt()
mean = torch.mean(x, dim=1, keepdim=True)
return ((((x - mean) / (std + self.eps)) * self.g) + self.b)
|
class PreNorm(nn.Module):
'Pre-Normalization layer'
def __init__(self, dim, fn):
super().__init__()
self.norm = LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
|
class FeedForward(nn.Module):
'Convolutional projection in the transformer.'
def __init__(self, dim, mult=4, dropout=0.0):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(dim, (dim * mult), 1), nn.GELU(), nn.Dropout(dropout), nn.Conv2d((dim * mult), dim, 1), nn.Dropout(dropout))
def forward(self, x):
return self.net(x)
|
class DepthWiseConv2d(nn.Module):
'Depthwise convolutional layer'
def __init__(self, dim_in, dim_out, kernel_size, padding, stride, bias=True):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(dim_in, dim_in, kernel_size=kernel_size, padding=padding, groups=dim_in, stride=stride, bias=bias), nn.BatchNorm2d(dim_in), nn.Conv2d(dim_in, dim_out, kernel_size=1, bias=bias))
def forward(self, x):
return self.net(x)
|
class Attention(nn.Module):
'Custom Attention layer'
def __init__(self, dim, proj_kernel, kv_proj_stride, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = (dim_head * heads)
padding = (proj_kernel // 2)
self.heads = heads
self.scale = (dim_head ** (- 0.5))
self.attend = nn.Softmax(dim=(- 1))
self.to_q = DepthWiseConv2d(dim, inner_dim, proj_kernel, padding=padding, stride=1, bias=False)
self.to_kv = DepthWiseConv2d(dim, (inner_dim * 2), proj_kernel, padding=padding, stride=kv_proj_stride, bias=False)
self.to_out = nn.Sequential(nn.Conv2d(inner_dim, dim, 1), nn.Dropout(dropout))
def forward(self, x):
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
shape = x.shape
(b, n, _, y, h) = (*shape, self.heads)
(q, k, v) = (self.to_q(x), *self.to_kv(x).chunk(2, dim=1))
(q, k, v) = map((lambda t: rearrange(t, 'b (h d) x y -> (b h) (x y) d', h=h)), (q, k, v))
dots = (torch.einsum('b i d, b j d -> b i j', q, k) * self.scale)
attn = self.attend(dots)
out = torch.einsum('b i j, b j d -> b i d', attn, v)
out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h=h, y=y)
return self.to_out(out)
|
class Transformer(nn.Module):
'Custom Transformer layer.'
def __init__(self, dim, proj_kernel, kv_proj_stride, depth, heads, dim_head=64, mlp_mult=4, dropout=0.0):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([PreNorm(dim, Attention(dim, proj_kernel=proj_kernel, kv_proj_stride=kv_proj_stride, heads=heads, dim_head=dim_head, dropout=dropout)), PreNorm(dim, FeedForward(dim, mlp_mult, dropout=dropout))]))
def forward(self, x):
for (attn, ff) in self.layers:
x = (attn(x) + x)
x = (ff(x) + x)
return x
|
class CvT(nn.Module):
'Convolutional Transformer module.\n\n Adapted for self-supervised training\n\n Attributes\n ----------\n s{i}_emb_dim: int\n Embedding dimention at stage i\n\n s{i}_emb_kernel: int\n Convolutional kernel size at stage i\n\n s{i}_emb_stride: int\n Convolutional stride at stage i\n\n s{i}_kv_proj_stride: int\n Convolutional stride in the convolutional projection layers at stage i\n\n s{i}_heads: int\n Number of attention heads at stage i\n\n s{i}_depth: int\n Transformer depth at stage i\n\n s{i}_mlp_mult: int\n MLP ratio at stage i\n\n dropout: float\n Dropout ratio\n '
sample_rate = 16000
embedding_size = 2048
scene_embedding_size = embedding_size
timestamp_embedding_size = embedding_size
def __init__(self, *, s1_emb_dim=64, s1_emb_kernel=7, s1_emb_stride=4, s1_proj_kernel=3, s1_kv_proj_stride=2, s1_heads=1, s1_depth=1, s1_mlp_mult=4, s2_emb_dim=192, s2_emb_kernel=3, s2_emb_stride=2, s2_proj_kernel=3, s2_kv_proj_stride=2, s2_heads=3, s2_depth=2, s2_mlp_mult=4, s3_emb_dim=384, s3_emb_kernel=3, s3_emb_stride=2, s3_proj_kernel=3, s3_kv_proj_stride=2, s3_heads=6, s3_depth=10, s3_mlp_mult=4, dropout=0.0, pool='mean'):
super().__init__()
kwargs = dict(locals())
dim = 1
layers = []
for prefix in ('s1', 's2', 's3'):
(config, kwargs) = group_by_key_prefix_and_remove_prefix(f'{prefix}_', kwargs)
layers.append(nn.Sequential(nn.Conv2d(dim, config['emb_dim'], kernel_size=config['emb_kernel'], padding=(config['emb_kernel'] // 2), stride=config['emb_stride']), LayerNorm(config['emb_dim']), Transformer(dim=config['emb_dim'], proj_kernel=config['proj_kernel'], kv_proj_stride=config['kv_proj_stride'], depth=config['depth'], heads=config['heads'], mlp_mult=config['mlp_mult'], dropout=dropout)))
dim = config['emb_dim']
self.pool = pool
assert (self.pool in ['mean', 'max', 'mean+max'])
if (self.pool == 'mean'):
self.pool_layers = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten())
elif (self.pool == 'max'):
self.pool_layers = (nn.Sequential(nn.AdaptiveMaxPool2d(1), nn.Flatten()),)
else:
self.pool_layers = nn.Sequential(nn.Identity())
self.layers = nn.Sequential(*layers, *self.pool_layers)
def forward(self, x):
x = self.layers(x)
if (self.pool == 'mean+max'):
x = x.permute(0, 3, 2, 1)
(B, T, D, C) = x.shape
x = x.reshape((B, T, (C * D)))
x = (x.mean(1) + x.amax(1))
return x
|
def conv3x3(in_planes: int, out_planes: int, stride: int=1, groups: int=1, dilation: int=1, standardize_weights: bool=False) -> nn.Conv2d:
'3x3 convolution with padding'
conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
return (weight_norm(conv) if standardize_weights else conv)
|
def conv1x1(in_planes: int, out_planes: int, stride: int=1, standardize_weights: bool=False) -> nn.Conv2d:
'1x1 convolution'
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
return (weight_norm(conv) if standardize_weights else conv)
|
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None, standardize_weights: bool=False) -> None:
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride, standardize_weights=standardize_weights)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, standardize_weights=standardize_weights)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(self, inplanes: int, planes: int, stride: int=1, downsample: Optional[nn.Module]=None, groups: int=1, base_width: int=64, dilation: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None, standardize_weights: bool=False) -> None:
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width, standardize_weights=standardize_weights)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation, standardize_weights=standardize_weights)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion), standardize_weights=standardize_weights)
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
class ResNetish(nn.Module):
sample_rate = 16000
embedding_size = 2048
scene_embedding_size = embedding_size
timestamp_embedding_size = embedding_size
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, zero_init_residual: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None, standardize_weights: bool=False) -> None:
super(ResNetish, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False)
self.conv1 = (weight_norm(conv1) if standardize_weights else conv1)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], standardize_weights=standardize_weights)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], standardize_weights=standardize_weights)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], standardize_weights=standardize_weights)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], standardize_weights=standardize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[(BasicBlock, Bottleneck)]], planes: int, blocks: int, stride: int=1, dilate: bool=False, standardize_weights: bool=False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride, standardize_weights), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.permute(0, 3, 2, 1)
(B, T, D, C) = x.shape
x = x.reshape((B, T, (C * D)))
x = (x.mean(1) + x.amax(1))
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
def _resnetish(arch: str, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], pretrained: bool, progress: bool, **kwargs: Any) -> ResNetish:
model = ResNetish(block, layers, **kwargs)
return model
|
def resnetish10(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
'ResNet-10 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnetish('resnetish18', BasicBlock, [1, 1, 1, 1], pretrained, progress, **kwargs)
|
def resnetish18(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnetish('resnetish18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
|
def resnetish34(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Adapted for Audio from\n `"CNN architectures for large-scale audio classification" <https://arxiv.org/abs/1609.09430>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnetish('resnetish34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnetish50(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNetish:
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n Adapted for Audio from\n `"CNN architectures for large-scale audio classification" <https://arxiv.org/abs/1609.09430>`_.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnetish('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
class Lambda(nn.Module):
'[NOT USED] Custom tensorflow-like Lambda function layer.'
def __init__(self, function):
super(Lambda, self).__init__()
self.function = function
def forward(self, x: Tensor) -> Tensor:
return self.function(x)
|
class NetworkCommonMixIn():
'Common mixin for network definition.'
def load_weight(self, weight_file, device):
'Utility to load a weight file to a device.'
state_dict = torch.load(weight_file, map_location=device)
if ('state_dict' in state_dict):
state_dict = state_dict['state_dict']
weights = {}
for k in state_dict:
m = re.search('(^fc\\.|\\.fc\\.|^features\\.|\\.features\\.)', k)
if (m is None):
continue
new_k = k[m.start():]
new_k = (new_k[1:] if (new_k[0] == '.') else new_k)
weights[new_k] = state_dict[k]
self.load_state_dict(weights)
self.eval()
logging.info(f'Using audio embbeding network pretrained weight: {Path(weight_file).name}')
return self
def set_trainable(self, trainable=False):
for p in self.parameters():
if p.requires_grad:
p.requires_grad = trainable
|
class UpstreamExpert(nn.Module):
def __init__(self, ckpt: str=None, model_name: str=None, window_secs: float=1, hop_secs: float=0.05, model_config: str=None):
super().__init__()
self.model = serab.load_model(ckpt, model_name)
self.frame_duration = (window_secs * 1000)
self.hop_size = (hop_secs * 1000)
self.model_config = model_config
def get_downsample_rates(self, key: str=None) -> int:
return int(((self.hop_size / 1000) * SAMPLE_RATE))
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor])])]:
padded_wavs = pad_sequence(wavs, batch_first=True)
(embeddings, timestamps) = serab.get_timestamp_embeddings(padded_wavs, self.model, self.frame_duration, self.hop_size)
return {'hidden_states': [embeddings]}
|
def byol_s_default(refresh: bool=False, **kwds):
kwds['model_name'] = 'default'
kwds['ckpt'] = _urls_to_filepaths('https://github.com/GasserElbanna/serab-byols/raw/main/checkpoints/default2048_BYOLAs64x96-2105311814-e100-bs256-lr0003-rs42.pth', refresh=refresh)
return _UpstreamExpert(**kwds)
|
def byol_s_cvt(refresh: bool=False, **kwds):
kwds['model_name'] = 'cvt'
kwds['ckpt'] = _urls_to_filepaths('https://github.com/GasserElbanna/serab-byols/raw/main/checkpoints/cvt_s1-d1-e64_s2-d1-e256_s3-d1-e512_BYOLAs64x96-osandbyolaloss6373-e100-bs256-lr0003-rs42.pth', refresh=refresh)
return _UpstreamExpert(**kwds)
|
def byol_s_resnetish34(refresh: bool=False, **kwds):
kwds['model_name'] = 'resnetish34'
kwds['ckpt'] = _urls_to_filepaths('https://github.com/GasserElbanna/serab-byols/raw/main/checkpoints/resnetish34_BYOLAs64x96-2105271915-e100-bs256-lr0003-rs42.pth', refresh=refresh)
return _UpstreamExpert(**kwds)
|
def get_model(model_name: str='', cfg={}) -> torch.nn.Module:
'Define the model object.\n\n Parameters\n ----------\n model_name: str, the name for pretrained model\n cfg: dict, the cfg parameters\n\n Returns\n -------\n torch.nn.Module object or a tensorflow "trackable" object\n '
if (model_name == 'default'):
model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)
elif (model_name == 'resnetish34'):
model = resnetish34()
elif (model_name == 'clstm'):
model = CLSTM()
elif (model_name == 'cvt'):
(s1_depth, s2_depth, s3_depth) = cfg.depths
(s1_emb_dim, s2_emb_dim, s3_emb_dim) = cfg.embed_dims
(s1_mlp_mult, s2_mlp_mult, s3_mlp_mult) = cfg.mlp_mults
model = CvT(s1_emb_dim=s1_emb_dim, s1_depth=s1_depth, s1_mlp_mult=s1_mlp_mult, s2_emb_dim=s2_emb_dim, s2_depth=s2_depth, s2_mlp_mult=s2_mlp_mult, s3_emb_dim=s3_emb_dim, s3_depth=s3_depth, s3_mlp_mult=s3_mlp_mult, pool=cfg.cvt_pool)
else:
raise ValueError('Model not found.')
return model
|
def load_model(model_file_path: str='', model_name: str='default', cfg_path: str=None) -> torch.nn.Module:
'Load pre-trained DL models.\n\n Parameters\n ----------\n model_name: str, the name for pretrained model\n model_file_path: str, the path for pretrained model\n cfg_path: str, the path for yaml file including parameters value\n\n Returns\n -------\n torch.nn.Module object or a tensorflow "trackable" object\n Model loaded with pre-training weights\n '
cfg_path = (cfg_path or (Path(__file__).parent / 'config.yaml'))
cfg = load_yaml_config(cfg_path)
model = get_model(model_name, cfg)
state_dict = torch.load(model_file_path)
model.load_state_dict(state_dict)
return model
|
def get_timestamp_embeddings(audio_list: List, model: torch.nn.Module, frame_duration: float=TIMESTAMP_FRAME_DUR, hop_size: float=TIMESTAMP_HOP_SIZE, cfg_path: str=None) -> Tuple[(Tensor, Tensor)]:
'\n This function returns embeddings at regular intervals centered at timestamps. Both\n the embeddings and corresponding timestamps (in milliseconds) are returned.\n Args:\n audio_list: List of torch tensor audios.\n model: Loaded model.\n frame_duration: Frame (segement) duration in milliseconds\n hop_size: Hop size in milliseconds.\n NOTE: Not required by the HEAR API. We add this optional parameter\n to improve the efficiency of scene embedding.\n cfg_path: str, the path for yaml file including parameters value\n Returns:\n - Tensor: embeddings, A float32 Tensor with shape (n_sounds, n_timestamps,\n model.timestamp_embedding_size).\n - Tensor: timestamps, Centered timestamps in milliseconds corresponding\n to each embedding in the output. Shape: (n_sounds, n_timestamps).\n '
cfg_path = (cfg_path or (Path(__file__).parent / 'config.yaml'))
cfg = load_yaml_config(cfg_path)
to_melspec = MelSpectrogram(sample_rate=cfg.sample_rate, n_fft=cfg.n_fft, win_length=cfg.win_length, hop_length=cfg.hop_length, n_mels=cfg.n_mels, f_min=cfg.f_min, f_max=cfg.f_max).to(audio_list[0].device)
model = model.to(audio_list[0].device)
(frames, timestamps) = frame_audio(audio_list, frame_size=((frame_duration / 1000) * cfg.sample_rate), hop_size=hop_size, sample_rate=cfg.sample_rate)
(audio_batches, num_frames, _) = frames.shape
frames = frames.flatten(end_dim=1)
melspec_frames = (to_melspec(frames) + torch.finfo(torch.float).eps).log()
normalizer = PrecomputedNorm(compute_timestamp_stats(melspec_frames))
melspec_frames = normalizer(melspec_frames).unsqueeze(1)
embeddings = model(melspec_frames)
embeddings = embeddings.reshape(audio_batches, num_frames, (- 1))
return (embeddings, timestamps)
|
def get_scene_embeddings(audio_list: List, model: torch.nn.Module, cfg_path: str=None) -> Tensor:
'\n This function returns a single embedding for each audio clip. In this baseline\n implementation we simply summarize the temporal embeddings from\n get_timestamp_embeddings() using torch.mean().\n Args:\n audio_list: list of torch tensor audios (audios should be resampled to 16kHz).\n model: Loaded model.\n cfg_path:\n Returns:\n - embeddings, A float32 Tensor with shape\n (n_sounds, model.scene_embedding_size).\n '
cfg_path = (cfg_path or (Path(__file__).parent / 'config.yaml'))
device = audio_list[0].device
cfg = load_yaml_config(cfg_path)
to_melspec = MelSpectrogram(sample_rate=cfg.sample_rate, n_fft=cfg.n_fft, win_length=cfg.win_length, hop_length=cfg.hop_length, n_mels=cfg.n_mels, f_min=cfg.f_min, f_max=cfg.f_max).to(device)
stats = compute_scene_stats(audio_list, to_melspec)
normalizer = PrecomputedNorm(stats)
model = model.to(device)
embeddings = generate_byols_embeddings(model, audio_list, to_melspec, normalizer, device)
return embeddings
|
def get_default_cpc_config():
parser = set_default_cpc_config(argparse.ArgumentParser())
return parser.parse_args([])
|
def set_default_cpc_config(parser):
group = parser.add_argument_group('Architecture configuration', description="The arguments defining the model's architecture.")
group.add_argument('--hiddenEncoder', type=int, default=256, help='Hidden dimension of the encoder network.')
group.add_argument('--hiddenGar', type=int, default=256, help='Hidden dimension of the auto-regressive network')
group.add_argument('--nPredicts', type=int, default=12, help='Number of steps to predict.')
group.add_argument('--negativeSamplingExt', type=int, default=128, help='Number of negative samples to take.')
group.add_argument('--learningRate', type=float, default=0.0002)
group.add_argument('--schedulerStep', type=int, default=(- 1), help='Step of the learning rate scheduler: at each step the learning rate is divided by 2. Default: no scheduler.')
group.add_argument('--schedulerRamp', type=int, default=None, help='Enable a warm up phase for the learning rate: adds a linear ramp of the given size.')
group.add_argument('--beta1', type=float, default=0.9, help='Value of beta1 for the Adam optimizer')
group.add_argument('--beta2', type=float, default=0.999, help='Value of beta2 for the Adam optimizer')
group.add_argument('--epsilon', type=float, default=1e-08, help='Value of epsilon for the Adam optimizer')
group.add_argument('--sizeWindow', type=int, default=20480, help='Number of frames to consider at each batch.')
group.add_argument('--nEpoch', type=int, default=200, help='Number of epoch to run')
group.add_argument('--samplingType', type=str, default='samespeaker', choices=['samespeaker', 'uniform', 'samesequence', 'sequential'], help='How to sample the negative examples in the CPC loss.')
group.add_argument('--nLevelsPhone', type=int, default=1, help='(Supervised mode only). Number of layers in the phone classification network.')
group.add_argument('--cpc_mode', type=str, default=None, choices=['reverse', 'none'], help='Some variations on CPC.')
group.add_argument('--encoder_type', type=str, choices=['cpc', 'mfcc', 'lfb'], default='cpc', help='Replace the encoder network by mfcc features or learned filter banks')
group.add_argument('--normMode', type=str, default='layerNorm', choices=['instanceNorm', 'ID', 'layerNorm', 'batchNorm'], help='Type of normalization to use in the encoder network (default is layerNorm).')
group.add_argument('--onEncoder', action='store_true', help="(Supervised mode only) Perform the classification on the encoder's output.")
group.add_argument('--random_seed', type=int, default=None, help='Set a specific random seed.')
group.add_argument('--speakerEmbedding', type=int, default=0, help='(Depreciated) Feed the prediction network with speaker embeddings along with the usual sequence.')
group.add_argument('--arMode', default='LSTM', choices=['GRU', 'LSTM', 'RNN', 'no_ar', 'transformer'], help='Architecture to use for the auto-regressive network (default is lstm).')
group.add_argument('--nLevelsGRU', type=int, default=1, help='Number of layers in the autoregressive network.')
group.add_argument('--rnnMode', type=str, default='transformer', choices=['transformer', 'RNN', 'LSTM', 'linear', 'ffd', 'conv4', 'conv8', 'conv12'], help='Architecture to use for the prediction network')
group.add_argument('--dropout', action='store_true', help='Add a dropout layer at the output of the prediction network.')
group.add_argument('--abspos', action='store_true', help='If the prediction network is a transformer, active to use absolute coordinates.')
return parser
|
class UpstreamExpert(UpstreamBase):
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
locArgs = get_default_cpc_config()
checkpoint = torch.load(ckpt, map_location='cpu')
loadArgs(locArgs, argparse.Namespace(**checkpoint['config']))
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
self.model = cpcmodel(encoderNet, arNet)
self.model.load_state_dict(checkpoint['weights'], strict=False)
if (len(self.hooks) == 0):
self.add_hook('self.model.gEncoder', (lambda input, output: output.transpose(1, 2)))
self.add_hook('self.model.gAR', (lambda input, output: output))
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
padded_wav = pad_sequence(wavs, batch_first=True)
features = self.model(padded_wav.unsqueeze(1), None)[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.