repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/tacotron2_loss.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from typing import Any, Dict, List
from functools import lru_cache
from dataclasses import dataclass, field
import torch
from omegaconf import II
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
import torch.nn.functional as F
logger = logging.getLogger(__name__)
@dataclass
class Tacotron2CriterionConfig(FairseqDataclass):
bce_pos_weight: float = field(
default=1.0,
metadata={"help": "weight of positive examples for BCE loss"},
)
n_frames_per_step: int = field(
default=0,
metadata={"help": "Number of frames per decoding step"},
)
use_guided_attention_loss: bool = field(
default=False,
metadata={"help": "use guided attention loss"},
)
guided_attention_loss_sigma: float = field(
default=0.4,
metadata={"help": "weight of positive examples for BCE loss"},
)
ctc_weight: float = field(default=0.0, metadata={"help": "weight for CTC loss"})
sentence_avg: bool = II("optimization.sentence_avg")
class GuidedAttentionLoss(torch.nn.Module):
"""
Efficiently Trainable Text-to-Speech System Based on Deep Convolutional
Networks with Guided Attention (https://arxiv.org/abs/1710.08969)
"""
def __init__(self, sigma):
super().__init__()
self.sigma = sigma
@staticmethod
@lru_cache(maxsize=8)
def _get_weight(s_len, t_len, sigma):
grid_x, grid_y = torch.meshgrid(torch.arange(t_len), torch.arange(s_len))
grid_x = grid_x.to(s_len.device)
grid_y = grid_y.to(s_len.device)
w = (grid_y.float() / s_len - grid_x.float() / t_len) ** 2
return 1.0 - torch.exp(-w / (2 * (sigma**2)))
def _get_weights(self, src_lens, tgt_lens):
bsz, max_s_len, max_t_len = len(src_lens), max(src_lens), max(tgt_lens)
weights = torch.zeros((bsz, max_t_len, max_s_len))
for i, (s_len, t_len) in enumerate(zip(src_lens, tgt_lens)):
weights[i, :t_len, :s_len] = self._get_weight(s_len, t_len, self.sigma)
return weights
@staticmethod
def _get_masks(src_lens, tgt_lens):
in_masks = lengths_to_mask(src_lens)
out_masks = lengths_to_mask(tgt_lens)
return out_masks.unsqueeze(2) & in_masks.unsqueeze(1)
def forward(self, attn, src_lens, tgt_lens, reduction="mean"):
weights = self._get_weights(src_lens, tgt_lens).to(attn.device)
masks = self._get_masks(src_lens, tgt_lens).to(attn.device)
loss = (weights * attn.transpose(1, 2)).masked_select(masks)
loss = torch.sum(loss) if reduction == "sum" else torch.mean(loss)
return loss
@register_criterion("tacotron2", dataclass=Tacotron2CriterionConfig)
class Tacotron2Criterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
n_frames_per_step,
use_guided_attention_loss,
guided_attention_loss_sigma,
bce_pos_weight,
ctc_weight,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.n_frames_per_step = n_frames_per_step
self.bce_pos_weight = bce_pos_weight
self.guided_attn = None
if use_guided_attention_loss:
self.guided_attn = GuidedAttentionLoss(guided_attention_loss_sigma)
self.ctc_weight = ctc_weight
def forward(self, model, sample, reduction="mean"):
bsz, max_len, _ = sample["target"].size()
feat_tgt = sample["target"]
feat_len = sample["target_lengths"].view(bsz, 1).expand(-1, max_len)
eos_tgt = torch.arange(max_len).to(sample["target"].device)
eos_tgt = eos_tgt.view(1, max_len).expand(bsz, -1)
eos_tgt = (eos_tgt == (feat_len - 1)).float()
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
feat_out, eos_out, extra = model(
src_tokens=src_tokens,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"],
)
l1_loss, mse_loss, eos_loss = self.compute_loss(
extra["feature_out"],
feat_out,
eos_out,
feat_tgt,
eos_tgt,
tgt_lens,
reduction,
)
attn_loss = torch.tensor(0.0).type_as(l1_loss)
if self.guided_attn is not None:
attn_loss = self.guided_attn(extra["attn"], src_lens, tgt_lens, reduction)
ctc_loss = torch.tensor(0.0).type_as(l1_loss)
if self.ctc_weight > 0.0:
net_output = (feat_out, eos_out, extra)
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.transpose(0, 1) # T x B x C
src_mask = lengths_to_mask(src_lens)
src_tokens_flat = src_tokens.masked_select(src_mask)
ctc_loss = (
F.ctc_loss(
lprobs,
src_tokens_flat,
tgt_lens,
src_lens,
reduction=reduction,
zero_infinity=True,
)
* self.ctc_weight
)
loss = l1_loss + mse_loss + eos_loss + attn_loss + ctc_loss
sample_size = sample["nsentences"] if self.sentence_avg else sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"l1_loss": utils.item(l1_loss.data),
"mse_loss": utils.item(mse_loss.data),
"eos_loss": utils.item(eos_loss.data),
"attn_loss": utils.item(attn_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
}
return loss, sample_size, logging_output
def compute_loss(
self,
feat_out,
feat_out_post,
eos_out,
feat_tgt,
eos_tgt,
tgt_lens,
reduction="mean",
):
mask = lengths_to_mask(tgt_lens)
_eos_out = eos_out[mask].squeeze()
_eos_tgt = eos_tgt[mask]
_feat_tgt = feat_tgt[mask]
_feat_out = feat_out[mask]
_feat_out_post = feat_out_post[mask]
l1_loss = F.l1_loss(_feat_out, _feat_tgt, reduction=reduction) + F.l1_loss(
_feat_out_post, _feat_tgt, reduction=reduction
)
mse_loss = F.mse_loss(_feat_out, _feat_tgt, reduction=reduction) + F.mse_loss(
_feat_out_post, _feat_tgt, reduction=reduction
)
eos_loss = F.binary_cross_entropy_with_logits(
_eos_out,
_eos_tgt,
pos_weight=torch.tensor(self.bce_pos_weight),
reduction=reduction,
)
return l1_loss, mse_loss, eos_loss
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
for key in ["loss", "l1_loss", "mse_loss", "eos_loss", "attn_loss", "ctc_loss"]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
metrics.log_scalar("sample_size", ntot, len(logging_outputs))
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return False
| 8,535
| 35.478632
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/speech_text_pretraining_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import math
import numpy as np
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
@dataclass
class SpeechTextPretrainingCriterionConfig(FairseqDataclass):
text_masked_language_modeling_weight: float = field(
default=1.0,
metadata={"help": "the weight of text masked language modeling loss"},
)
speech_masked_language_modeling_weight: float = field(
default=1.0,
metadata={"help": "the weight of speech masked language modeling loss"},
)
speech_text_matching_weight: float = field(
default=1.0, metadata={"help": "the weight of speech text matching loss"}
)
asr_ce_loss_weight: float = field(
default=1.0, metadata={"help": "the weight of asr cross entropy loss"}
)
asr_quantity_loss_weight: float = field(
default=1.0, metadata={"help": "the weight of asr quantity loss"}
)
asr_ctc_loss_weight: float = field(
default=1.0, metadata={"help": "the weight of asr ctc loss"}
)
translation_language_modeling_weight: float = field(
default=1.0,
metadata={"help": "the weight of translation language modeling loss"},
)
tts_loss_weight: float = field(
default=1.0, metadata={"help": "the weight of text-to-speech loss"}
)
infonce_weight: float = field(
default=1.0, metadata={"help": "the weight of audio contrastive loss"}
)
prob_ppl_weight: float = field(
default=1.0, metadata={"help": "the weight of probability perplexity"}
)
feat_pen_weight: float = field(
default=1.0, metadata={"help": "the weight of feature penalty"}
)
available_losses: Optional[List[str]] = field(
default=None, metadata={"help": "the list of all available losses"}
)
mode: int = field(
default=1,
metadata={"help": "the training mode used for different data structure"},
)
@register_criterion(
"speech_text_pretraining_criterion", dataclass=SpeechTextPretrainingCriterionConfig
)
class SpeechTextPretrainingCriterion(FairseqCriterion):
def __init__(self, cfg: SpeechTextPretrainingCriterionConfig, task):
super().__init__(task)
## All losses weight configuration
# 1. Losses for unpaired samples
self._infonce_weight = cfg.infonce_weight
self._prob_ppl_weight = cfg.prob_ppl_weight
self._feat_pen_weight = cfg.feat_pen_weight
self._text_masked_language_modeling_weight = (
cfg.text_masked_language_modeling_weight
)
self._speech_masked_language_modeling_weight = (
cfg.speech_masked_language_modeling_weight
)
# 2. Losses for paired samples
self._speech_text_matching_weight = cfg.speech_text_matching_weight
self._asr_ce_loss_weight = cfg.asr_ce_loss_weight
self._asr_ctc_loss_weight = cfg.asr_ctc_loss_weight
self._asr_quantity_loss_weight = cfg.asr_quantity_loss_weight
self._translation_language_modeling_weight = (
cfg.translation_language_modeling_weight
)
self._tts_loss_weight = cfg.tts_loss_weight
## All available losses
self._available_losses = cfg.available_losses
# Other settings
self.default_dict = task.default_dictionary
self.pad_ids = self.default_dict.pad()
# Data config
self._mode = cfg.mode
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""Get normalized probabilities (or log probs) from logits."""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_text_masked_language_modeling_loss(self, net_output):
text_mlm_logits = net_output["text_mlm_logits"]
text_mlm_targets = net_output["text_mlm_targets"]
text_mlm_num_tokens = text_mlm_targets.numel()
text_mlm_logprobs = self.get_probs_from_logits(text_mlm_logits, log_probs=True)
text_mlm_logprobs = text_mlm_logprobs.view(-1, text_mlm_logprobs.size(-1))
text_mlm_targets = text_mlm_targets.contiguous().view(
-1
) # flatten targets tensor B x T_mask
text_mlm_loss = F.nll_loss(
text_mlm_logprobs,
text_mlm_targets.long(),
ignore_index=self.pad_ids,
reduction="sum",
) # CE loss is the summation of all tokens, without any form of averaging
if text_mlm_targets.numel() == 0 and text_mlm_logprobs.numel() == 0:
text_mlm_loss = torch.tensor(0.0).cuda()
if text_mlm_loss != text_mlm_loss: # Handling nan loss error
print("text_mlm_logits: ", text_mlm_logits)
print(torch.isnan(text_mlm_logits).sum())
print("text_mlm_targets: ", text_mlm_targets)
print("text_mlm_targets: ", text_mlm_targets.size())
print(torch.isnan(text_mlm_logprobs).sum())
print("text_mlm_logprobs: ", text_mlm_logprobs)
print("text_mlm_logprobs: ", text_mlm_logprobs.size())
raise ValueError("loss equals nan errors.")
return text_mlm_loss, text_mlm_num_tokens
def get_speech_masked_language_modeling_loss(self, net_output):
num_spec_samples = net_output["num_spec_samples"]
spec_mlm_targets = net_output["spec_mlm_targets"]
spec_mlm_logits = net_output["spec_mlm_logits"]
spec_mlm_num_tokens = spec_mlm_targets.numel()
spec_mlm_logprobs = self.get_probs_from_logits(
spec_mlm_logits, log_probs=True
) #
spec_mlm_logprobs = spec_mlm_logprobs.view(
-1, spec_mlm_logprobs.size(-1)
) # (B x T_mask) x V
spec_mlm_targets = spec_mlm_targets.contiguous().view(
-1
) # flatten targets tensor B x T_mask
spec_mlm_loss = F.nll_loss(
spec_mlm_logprobs,
spec_mlm_targets.long(),
reduction="sum",
) # CE loss is the summation of all tokens, without any form of averaging
return spec_mlm_loss, spec_mlm_num_tokens
def get_translation_language_modeling_loss(self, net_output):
num_pair_samples = net_output["num_pair_samples"]
paired_text_tlm_logits = net_output["paired_text_tlm_logits"]
paired_text_tlm_targets = net_output["paired_text_tlm_targets"]
paired_spec_tlm_logits = net_output["paired_spec_tlm_logits"]
paired_spec_tlm_targets = net_output["paired_spec_tlm_targets"]
paired_text_tlm_logprobs = self.get_probs_from_logits(
paired_text_tlm_logits, log_probs=True
)
paired_spec_tlm_logprobs = self.get_probs_from_logits(
paired_spec_tlm_logits, log_probs=True
)
paired_text_tlm_logprobs = paired_text_tlm_logprobs.view(
-1, paired_text_tlm_logprobs.size(-1)
)
paired_text_tlm_targets = paired_text_tlm_targets.contiguous().view(
-1
) # flatten targets tensor
text_tlm_loss = F.nll_loss(
paired_text_tlm_logprobs,
paired_text_tlm_targets.long(),
ignore_index=self.pad_ids,
reduction="sum",
) # CE loss is the summation of all tokens, without any form of averaging
paired_spec_tlm_logprobs = paired_spec_tlm_logprobs.view(
-1, paired_spec_tlm_logprobs.size(-1)
)
paired_spec_tlm_targets = paired_spec_tlm_targets.contiguous().view(
-1
) # flatten targets tensor
spec_tlm_loss = F.nll_loss(
paired_spec_tlm_logprobs, paired_spec_tlm_targets.long(), reduction="sum"
) # CE loss is the summation of all tokens, without any form of averaging
tlm_loss = text_tlm_loss + spec_tlm_loss
tlm_num_tokens = (
paired_text_tlm_targets.numel() + paired_spec_tlm_targets.numel()
)
return tlm_loss, tlm_num_tokens
def get_speech_text_matching_loss(self, net_output):
num_pair_samples = net_output["num_pair_samples"]
stm_logits = net_output["stm_logits"]
stm_labels = net_output["stm_labels"]
stm_loss = F.binary_cross_entropy_with_logits(
stm_logits, stm_labels, reduction="sum"
)
return stm_loss
def get_asr_losses(self, net_output):
pass
def get_tts_loss(self, net_output):
pass
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample."""
# Forward the whole model
net_output = model(sample, mode=self._mode)
losses = dict()
assert (
self._available_losses != ""
), "please ensure there is at least one criterion in available criterions"
self.xla = False
text_mlm_num_tokens = None
spec_mlm_num_tokens = None
tlm_num_tokens = None
# Calculate all losses
if "infonce_loss" in self._available_losses:
infonce_logits = model.get_infonce_logits(net_output).float()
infonce_targets = model.get_infonce_targets(net_output)
infonce_loss = F.cross_entropy(
infonce_logits, infonce_targets, reduction="sum"
)
# Get number of samples in speech track
spec_mlm_num_tokens = infonce_targets.numel()
# Other relevant losses
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
for k, value in extra_losses.items():
if k == "prob_perplexity":
p = value.float() * spec_mlm_num_tokens * self._prob_ppl_weight
losses["prob_perplexity_loss"] = p
elif k == "features_pen":
p = value.float() * spec_mlm_num_tokens * self._feat_pen_weight
losses["feature_pen_loss"] = p
else:
raise NotImplementedError("Unsupported options.")
losses["infonce_loss"] = self._infonce_weight * infonce_loss
# Calculate Accuracy and Correct number
assert infonce_logits is not None
with torch.no_grad():
if infonce_logits.numel() == 0:
corr = 0
count = 0
else:
assert infonce_logits.dim() > 1, infonce_logits.shape
max = infonce_logits.argmax(-1) == 0
min = infonce_logits.argmin(-1) == 0
if is_xla_tensor(infonce_logits):
max, min = max * mi, min * mi
both = max & min
corr = max.long().sum() - both.long().sum()
count = mi.sum()
else:
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = float(max.numel())
if "text_mlm_loss" in self._available_losses:
(
text_mlm_loss,
text_mlm_num_tokens,
) = self.get_text_masked_language_modeling_loss(net_output)
losses["text_mlm_loss"] = (
self._text_masked_language_modeling_weight * text_mlm_loss
)
if "spec_mlm_loss" in self._available_losses:
(
spec_mlm_loss,
spec_mlm_num_tokens,
) = self.get_speech_masked_language_modeling_loss(net_output)
losses["spec_mlm_loss"] = (
self._speech_masked_language_modeling_weight * spec_mlm_loss
)
if "tlm_loss" in self._available_losses:
tlm_loss, tlm_num_tokens = self.get_translation_language_modeling_loss(
net_output
)
losses["tlm_loss"] = self._translation_language_modeling_weight * tlm_loss
if "stm_loss" in self._available_losses:
stm_loss = self.get_speech_text_matching_loss(net_output)
losses["stm_loss"] = self._speech_text_matching_weight * stm_loss
if "tts_loss" in self._available_losses:
tts_loss = self.get_tts_loss(net_output)
losses["tts_loss"] = self._tts_loss_weight * tts_loss
if "asr_loss" in self._available_losses:
asr_ce_loss, asr_ctc_loss, asr_quantity_loss = None, None, None
losses["asr_ce_loss"] = self._asr_ce_loss_weight * asr_ce_loss
losses["asr_ctc_loss"] = self._asr_ctc_loss_weight * asr_ctc_loss
losses["asr_quantity_loss"] = (
self._asr_quantity_loss_weight * asr_quantity_loss
)
# All sample size
sample_size = sample["data_labels"].size(0)
nsentences = sample_size
ntokens = sample_size
# Total losses values
loss = torch.tensor(0.0).cuda()
for loss_value in losses.values():
loss += loss_value
logging_output = {
"loss": loss.item(),
"sample_size": sample_size,
"nsentences": nsentences,
"ntokens": ntokens,
}
if "infonce_loss" in self._available_losses:
logging_output["infonce_correct"] = corr
logging_output["infonce_count"] = count
# Collect the number of samples for each data class
if "num_pair_samples" in net_output.keys():
num_pair_samples = net_output["num_pair_samples"]
logging_output["num_pair_samples"] = num_pair_samples
if "num_spec_samples" in net_output.keys():
num_spec_samples = net_output["num_spec_samples"]
logging_output["num_spec_samples"] = num_spec_samples
if "num_text_samples" in net_output.keys():
num_text_samples = net_output["num_text_samples"]
logging_output["num_text_samples"] = num_text_samples
if "tlm_loss" in self._available_losses:
logging_output["tlm_num_tokens"] = tlm_num_tokens
if (
"spec_mlm_loss" in self._available_losses
or "infonce_loss" in self._available_losses
):
logging_output["spec_mlm_num_tokens"] = spec_mlm_num_tokens
if "text_mlm_loss" in self._available_losses:
logging_output["text_mlm_num_tokens"] = text_mlm_num_tokens
if len(losses.keys()) >= 1:
for i, key in enumerate(losses.keys()):
logging_output[key] = losses[key].item()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
# Collect the total loss_summation over these many steps
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
num_spec_samples = utils.item(
sum(log.get("num_spec_samples", 0) for log in logging_outputs)
)
num_text_samples = utils.item(
sum(log.get("num_text_samples", 0) for log in logging_outputs)
)
num_pair_samples = utils.item(
sum(log.get("num_pair_samples", 0) for log in logging_outputs)
)
spec_mlm_num_tokens = utils.item(
sum(log.get("spec_mlm_num_tokens", 0) for log in logging_outputs)
)
text_mlm_num_tokens = utils.item(
sum(log.get("text_mlm_num_tokens", 0) for log in logging_outputs)
)
tlm_num_tokens = utils.item(
sum(log.get("tlm_num_tokens", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
# metrics.log_scalar(
# "loss", loss_sum / math.log(2) / len(logging_outputs), round=3
# )
metrics.log_scalar(
"loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3
)
metrics.log_scalar("spec_mlm_num_tokens", spec_mlm_num_tokens)
metrics.log_scalar("text_mlm_num_tokens", text_mlm_num_tokens)
metrics.log_scalar("tlm_num_tokens", tlm_num_tokens)
metrics.log_scalar("num_spec_samples", num_spec_samples)
metrics.log_scalar("num_text_samples", num_text_samples)
metrics.log_scalar("num_pair_samples", num_pair_samples)
metrics.log_scalar("sample_size", sample_size)
metrics.log_scalar("nsentences", nsentences)
metrics.log_scalar("ntokens", ntokens)
builtin_keys = {
"loss",
"nsentences",
"sample_size",
"num_spec_samples",
"num_text_samples",
"num_pair_samples",
"spec_mlm_num_tokens",
"text_mlm_num_tokens",
"tlm_num_tokens",
}
# infonce relevant information if necessary
if "infonce_loss" in logging_outputs[0].keys():
infonce_correct = sum(
log.get("infonce_correct", 0) for log in logging_outputs
)
metrics.log_scalar("infonce_correct", infonce_correct)
infonce_total = sum(log.get("infonce_count", 0) for log in logging_outputs)
metrics.log_scalar("infonce_total", infonce_total)
if infonce_total > 0:
metrics.log_derived(
"infonce_accuracy",
lambda meters: safe_round(
meters["infonce_correct"].sum / meters["infonce_total"].sum, 5
)
if meters["infonce_total"].sum > 0
else float("nan"),
)
for key in logging_outputs[0].keys():
if key not in builtin_keys:
val = sum(log.get(key, 0) for log in logging_outputs)
if "loss" in key: # Handling loss
val = utils.item(val)
if val != val: # Handling nan loss errors
for i in range(len(logging_outputs)):
single_value = logging_outputs[i][key]
print(single_value)
raise ValueError("nan appears at %s" % key)
# speech loss part
if key == "infonce_loss":
metrics.log_scalar(
key,
val / spec_mlm_num_tokens / math.log(2),
spec_mlm_num_tokens,
round=6,
)
if key == "prob_perplexity_loss":
metrics.log_scalar(
key,
val / spec_mlm_num_tokens / math.log(2),
spec_mlm_num_tokens,
round=6,
)
if key == "feature_pen_loss":
metrics.log_scalar(
key,
val / spec_mlm_num_tokens / math.log(2),
spec_mlm_num_tokens,
round=6,
)
if key == "spec_mlm_loss":
metrics.log_scalar(
key,
val / spec_mlm_num_tokens / math.log(2),
spec_mlm_num_tokens,
round=6,
)
# text loss part
if key == "text_mlm_loss":
if text_mlm_num_tokens == 0:
text_mlm_num_tokens = 1
metrics.log_scalar(
key,
val / text_mlm_num_tokens / math.log(2),
text_mlm_num_tokens,
round=6,
)
# pair loss part
if key == "tlm_loss":
metrics.log_scalar(
key,
val / tlm_num_tokens / math.log(2),
tlm_num_tokens,
round=6,
)
if key == "stm_loss":
metrics.log_scalar(
key,
val / num_pair_samples / math.log(2),
num_pair_samples,
round=6,
)
def logging_outputs_can_be_summed(self) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
# XXX: Gather based reduction not implemented for xla yet.
# So we fall to sum based reduction for xla.
return self.xla
| 21,721
| 39.225926
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/nar_cif.py
|
# @Time : 2021/7/14
# @Author : Minglun Han
# @File : nar_cif.py
import sys
import math
import editdistance
import numpy as np
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
@dataclass
class NarCifCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(default=False)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(default="letter")
# Cif loss settings
ce_loss_lambda: float = field(default=1.0)
apply_quantity_loss: bool = field(
default=True, metadata={"help": "apply quantity loss"}
)
apply_ctc_loss: bool = field(default=True, metadata={"help": "apply ctc loss"})
quantity_loss_lambda: float = field(
default=1.0, metadata={"help": "the interpolation weight of quantity loss"}
)
ctc_loss_lambda: float = field(
default=0.3, metadata={"help": "the interpolation weight of ctc loss"}
)
use_ctxt_cif_outputs: bool = field(default=False)
@register_criterion("nar_cif", dataclass=NarCifCriterionConfig)
class NarCifCriterion(FairseqCriterion):
def __init__(self, cfg: NarCifCriterionConfig, task: FairseqTask):
super().__init__(task)
# Register default special tokens
self.blank_idx = (
task.target_dictionary.index("<ctc_blank>")
if "<ctc_blank>" in task.target_dictionary.indices
else task.target_dictionary.bos()
)
self.pad_idx = task.target_dictionary.pad() # 1
self.eos_idx = task.target_dictionary.eos() # 2
self.bos_idx = task.target_dictionary.bos() # 0
# Loss settings
self.ce_loss_lambda = cfg.ce_loss_lambda
self.apply_quantity_loss = cfg.apply_quantity_loss
self.apply_ctc_loss = cfg.apply_ctc_loss
self.quantity_loss_lambda = cfg.quantity_loss_lambda
self.ctc_loss_lambda = cfg.ctc_loss_lambda
self.use_ctxt_cif_outputs = cfg.use_ctxt_cif_outputs
# other settings
self.post_process = cfg.post_process
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def get_loss(self, model, sample, net_output, reduce=True):
# Get model outputs
encoder_out = net_output["encoder_out"] # B x T x C
quantity_out = net_output["quantity_out"] # B
cif_out = net_output["cif_out"] # B x T x C
ctxt_cif_out = net_output["ctxt_cif_out"] # B x T x C
cif_out_padding_mask = net_output["cif_out_padding_mask"] # B x T
decoder_out = net_output["decoder_out"][
0
] # Get final decoder outputs (logits for cross-entropy loss)
# Collect src_lengths for the calculation of ctc loss
non_padding_mask = ~net_output["padding_mask"]
input_lengths = non_padding_mask.int().sum(-1)
# Collect targets and target_length for ctc loss and ce loss
target_lengths = sample["target_lengths"] # targets length w/o eos
target_with_eos = sample["target"] # this target has <eos> at the end
target_with_eos_lengths = target_lengths + 1 # targets length w/ eos
# Adjust targets: move the eos token from the last location to the end of valid location
batch_size = target_with_eos.size(0)
target_with_eos_non_padding_mask = (
(target_with_eos != self.eos_idx) & (target_with_eos != self.pad_idx)
).int() # B x T
add_eos_idx = (
((target_with_eos * target_with_eos_non_padding_mask) != 0)
.int()
.sum(dim=-1)
.unsqueeze(dim=-1)
) # B x 1
add_one_hot_tensor = (
torch.zeros(batch_size, target_with_eos_non_padding_mask.size(1))
.int()
.cuda()
.scatter_(1, add_eos_idx, 1)
* self.eos_idx
)
adjusted_target_with_eos = torch.where(
(
(target_with_eos.int() * target_with_eos_non_padding_mask)
+ add_one_hot_tensor
)
== 0,
torch.ones_like(target_with_eos).int().cuda() * self.pad_idx,
(target_with_eos.int() * target_with_eos_non_padding_mask)
+ add_one_hot_tensor,
)
# target_with_eos: [[20,56,7,8,1,1,1,1,2], ..., [60,6,7,349,34,1,1,1,2]]
# adjusted_target_with_eos: [[20,56,7,8,2,1,1,1,1], ..., [60,6,7,349,34,2,1,1,1]]
# Calculate the ctc loss on encoder outputs
ctc_loss = torch.tensor(0.0)
if self.apply_ctc_loss:
pad_mask = adjusted_target_with_eos != self.pad_idx
targets_flat = adjusted_target_with_eos.masked_select(pad_mask)
ctc_lprobs = model.get_probs_from_logits(
encoder_out, log_probs=True
).contiguous() # (B, T, V) from the encoder
target_lengths_for_ctc_loss = target_with_eos_lengths
with torch.backends.cudnn.flags(enabled=False):
ctc_loss = F.ctc_loss(
ctc_lprobs.transpose(0, 1), # T x B x v
targets_flat,
input_lengths,
target_lengths_for_ctc_loss,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
# Calculate the quantity loss
qtt_loss = torch.tensor(0.0)
if self.apply_quantity_loss:
target_lengths_for_qtt_loss = (
target_with_eos_lengths # Lengths after adding eos token, [B]
)
qtt_loss = torch.abs(quantity_out - target_lengths_for_qtt_loss).sum()
# Calculate the cross-entropy loss
cif_max_len = cif_out_padding_mask.size(1) # Get max length of cif outputs
target_max_length = target_with_eos_lengths.max() # Get max length of targets
min_len = min(
cif_max_len, target_max_length
) # Obtain the minimum length of cif length and target length
ce_logprobs = model.get_probs_from_logits(
decoder_out, log_probs=True
).contiguous() # B x T x C
truncated_target = adjusted_target_with_eos[
:, :min_len
] # Truncate target to min_len, B x T
truncated_ce_logprobs = ce_logprobs[
:, :min_len, :
] # Truncate ce probs to min_len, B x T x C
# Truncate target to the minimum length of original target and cif outputs,
# because sometimes the firing of CIF may lost <eos>.
truncated_ce_logprobs = truncated_ce_logprobs.view(
-1, truncated_ce_logprobs.size(-1)
)
truncated_target = truncated_target.contiguous().view(
-1
) # flatten targets tensor
ce_loss = F.nll_loss(
truncated_ce_logprobs,
truncated_target.long(),
ignore_index=self.pad_idx,
reduction="sum" if reduce else "none",
)
# Calculate the total loss
loss = (
self.ce_loss_lambda * ce_loss
+ self.quantity_loss_lambda * qtt_loss
+ self.ctc_loss_lambda * ctc_loss
)
# Collect the number of tokens in current batch
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
ntokens_with_eos = target_with_eos_lengths.sum().item()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ce_loss": utils.item(ce_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
"quantity_loss": utils.item(qtt_loss.data),
"ntokens": ntokens,
"ntokens_with_eos": ntokens_with_eos,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
# Evaluate on valid sets
if not model.training:
with torch.no_grad():
lprobs_t = ce_logprobs.float().contiguous().cpu()
cif_lengths = cif_out_padding_mask.int().sum(dim=-1) # B x T
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t, adjusted_target_with_eos, cif_lengths
):
lp = lp[:inp_l].unsqueeze(0)
# Process targets
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
# print(targ_units_arr)
# Handle log probabilities without elements
if min(lp.shape) == 0:
toks = targ
else:
toks = lp.argmax(dim=-1)
pred_units_arr = toks[
(toks != self.blank_idx)
& (toks != self.pad_idx)
& (toks != self.eos_idx)
].tolist()
# print(pred_units_arr)
# Calculate character error
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
# print("targ_words: ", targ_words)
pred_units = self.task.target_dictionary.string(pred_units_arr)
# print("pred_units: ", pred_units)
pred_words_raw = post_process(pred_units, self.post_process).split()
# print("pred_words_raw: ", pred_words_raw)
# Calculate word error
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True):
net_output = model(
target_lengths_with_eos=sample["target_lengths"] + 1, **sample["net_input"]
)
loss, sample_size, logging_output = self.get_loss(
model, sample, net_output, reduce=True
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ce_loss_sum = utils.item(sum(log.get("ce_loss", 0) for log in logging_outputs))
ctc_loss_sum = utils.item(
sum(log.get("ctc_loss", 0) for log in logging_outputs)
)
quantity_loss_sum = utils.item(
sum(log.get("quantity_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
ntokens_with_eos = utils.item(
sum(log.get("ntokens_with_eos", 0) for log in logging_outputs)
)
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"ce_loss", ce_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"ctc_loss", ctc_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"quantity_loss",
quantity_loss_sum / sample_size / math.log(2),
sample_size,
round=5,
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("ntokens_with_eos", ntokens_with_eos)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 14,961
| 39.005348
| 96
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/fairseq_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import Any, Dict, List
from fairseq import metrics, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, task):
super().__init__()
self.task = task
if hasattr(task, "target_dictionary"):
tgt_dict = task.target_dictionary
self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100
@classmethod
def add_args(cls, parser):
"""Add criterion-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@classmethod
def build_criterion(cls, cfg: FairseqDataclass, task):
"""Construct a criterion from command-line args."""
# arguments in the __init__.
init_args = {}
for p in inspect.signature(cls).parameters.values():
if (
p.kind == p.POSITIONAL_ONLY
or p.kind == p.VAR_POSITIONAL
or p.kind == p.VAR_KEYWORD
):
# we haven't implemented inference for these argument types,
# but PRs welcome :)
raise NotImplementedError("{} not supported".format(p.kind))
assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY}
if p.name == "task":
init_args["task"] = task
elif p.name == "cfg":
init_args["cfg"] = cfg
elif hasattr(cfg, p.name):
init_args[p.name] = getattr(cfg, p.name)
elif p.default != p.empty:
pass # we'll use the default value
else:
raise NotImplementedError(
"Unable to infer Criterion arguments, please implement "
"{}.build_criterion".format(cls.__name__)
)
return cls(**init_args)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(
logging_outputs: List[Dict[str, Any]]
) -> Dict[str, Any]:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
raise NotImplementedError
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
"""Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"Criterions should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs)
for k, v in agg_logging_outputs.items():
if k in {"nsentences", "ntokens", "sample_size"}:
continue
metrics.log_scalar(k, v)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
class LegacyFairseqCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task=task)
self.args = args
utils.deprecation_warning(
"Criterions should take explicit arguments instead of an "
"argparse.Namespace object, please update your criterion by "
"extending FairseqCriterion instead of LegacyFairseqCriterion."
)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
| 4,424
| 35.570248
| 79
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/nat_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from torch import Tensor
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedDualImitationCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
@register_criterion("nat_loss", dataclass=LabelSmoothedDualImitationCriterionConfig)
class LabelSmoothedDualImitationCriterion(FairseqCriterion):
def __init__(self, task, label_smoothing):
super().__init__(task)
self.label_smoothing = label_smoothing
def _compute_loss(
self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0
):
"""
outputs: batch x len x d_model
targets: batch x len
masks: batch x len
policy_logprob: if there is some policy
depends on the likelihood score as rewards.
"""
def mean_ds(x: Tensor, dim=None) -> Tensor:
return (
x.float().mean().type_as(x)
if dim is None
else x.float().mean(dim).type_as(x)
)
if masks is not None:
outputs, targets = outputs[masks], targets[masks]
if masks is not None and not masks.any():
nll_loss = torch.tensor(0)
loss = nll_loss
else:
logits = F.log_softmax(outputs, dim=-1)
if targets.dim() == 1:
losses = F.nll_loss(logits, targets.to(logits.device), reduction="none")
else: # soft-labels
losses = F.kl_div(logits, targets.to(logits.device), reduction="none")
losses = losses.sum(-1)
nll_loss = mean_ds(losses)
if label_smoothing > 0:
loss = (
nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing
)
else:
loss = nll_loss
loss = loss * factor
return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor}
def _custom_loss(self, loss, name="loss", factor=1.0):
return {"name": name, "loss": loss, "factor": factor}
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
nsentences, ntokens = sample["nsentences"], sample["ntokens"]
# B x T
src_tokens, src_lengths = (
sample["net_input"]["src_tokens"],
sample["net_input"]["src_lengths"],
)
tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"]
outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens)
losses, nll_loss = [], []
for obj in outputs:
if outputs[obj].get("loss", None) is None:
_losses = self._compute_loss(
outputs[obj].get("out"),
outputs[obj].get("tgt"),
outputs[obj].get("mask", None),
outputs[obj].get("ls", 0.0),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
else:
_losses = self._custom_loss(
outputs[obj].get("loss"),
name=obj + "-loss",
factor=outputs[obj].get("factor", 1.0),
)
losses += [_losses]
if outputs[obj].get("nll_loss", False):
nll_loss += [_losses.get("nll_loss", 0.0)]
loss = sum(l["loss"] for l in losses)
nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0)
# NOTE:
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
for l in losses:
logging_output[l["name"]] = (
utils.item(l["loss"].data / l["factor"])
if reduce
else l[["loss"]].data / l["factor"]
)
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
for key in logging_outputs[0]:
if key[-5:] == "-loss":
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(
key[:-5],
val / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 6,355
| 34.116022
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
try:
from simuleval.metrics.latency import (
AverageLagging,
AverageProportion,
DifferentiableAverageLagging,
)
LATENCY_METRICS = {
"average_lagging": AverageLagging,
"average_proportion": AverageProportion,
"differentiable_average_lagging": DifferentiableAverageLagging,
}
except ImportError:
LATENCY_METRICS = None
@dataclass
class LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
latency_avg_weight: float = field(
default=0.0,
metadata={"help": "weight fot average latency loss."},
)
latency_var_weight: float = field(
default=0.0,
metadata={"help": "weight fot variance latency loss."},
)
latency_avg_type: str = field(
default="differentiable_average_lagging",
metadata={"help": "latency type for average loss"},
)
latency_var_type: str = field(
default="variance_delay",
metadata={"help": "latency typ for variance loss"},
)
latency_gather_method: str = field(
default="weighted_average",
metadata={"help": "method to gather latency loss for all heads"},
)
latency_update_after: int = field(
default=0,
metadata={"help": "Add latency loss after certain steps"},
)
@register_criterion(
"latency_augmented_label_smoothed_cross_entropy",
dataclass=LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig,
)
class LatencyAugmentedLabelSmoothedCrossEntropyCriterion(
LabelSmoothedCrossEntropyCriterion
):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size,
report_accuracy,
latency_avg_weight,
latency_var_weight,
latency_avg_type,
latency_var_type,
latency_gather_method,
latency_update_after,
):
super().__init__(
task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy
)
assert LATENCY_METRICS is not None, "Please make sure SimulEval is installed."
self.latency_avg_weight = latency_avg_weight
self.latency_var_weight = latency_var_weight
self.latency_avg_type = latency_avg_type
self.latency_var_type = latency_var_type
self.latency_gather_method = latency_gather_method
self.latency_update_after = latency_update_after
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
# 1. Compute cross entropy loss
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
# 2. Compute cross latency loss
latency_loss, expected_latency, expected_delays_var = self.compute_latency_loss(
model, sample, net_output
)
if self.latency_update_after > 0:
num_updates = getattr(model.decoder, "num_updates", None)
assert (
num_updates is not None
), "model.decoder doesn't have attribute 'num_updates'"
if num_updates <= self.latency_update_after:
latency_loss = 0
loss += latency_loss
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"latency": expected_latency,
"delays_var": expected_delays_var,
"latency_loss": latency_loss,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def compute_latency_loss(self, model, sample, net_output):
assert (
net_output[-1].encoder_padding_mask is None
or not net_output[-1].encoder_padding_mask[:, 0].any()
), "Only right padding on source is supported."
# 1. Obtain the expected alignment
alpha_list = [item["alpha"] for item in net_output[1].attn_list]
num_layers = len(alpha_list)
bsz, num_heads, tgt_len, src_len = alpha_list[0].size()
# bsz * num_layers * num_heads, tgt_len, src_len
alpha_all = torch.cat(alpha_list, dim=1).view(-1, tgt_len, src_len)
# 2 compute expected delays
# bsz * num_heads * num_layers, tgt_len, src_len for MMA
steps = (
torch.arange(1, 1 + src_len)
.unsqueeze(0)
.unsqueeze(1)
.expand_as(alpha_all)
.type_as(alpha_all)
)
expected_delays = torch.sum(steps * alpha_all, dim=-1)
target_padding_mask = (
model.get_targets(sample, net_output)
.eq(self.padding_idx)
.unsqueeze(1)
.expand(bsz, num_layers * num_heads, tgt_len)
.contiguous()
.view(-1, tgt_len)
)
src_lengths = (
sample["net_input"]["src_lengths"]
.unsqueeze(1)
.expand(bsz, num_layers * num_heads)
.contiguous()
.view(-1)
)
expected_latency = LATENCY_METRICS[self.latency_avg_type](
expected_delays, src_lengths, None, target_padding_mask=target_padding_mask
)
# 2.1 average expected latency of heads
# bsz, num_layers * num_heads
expected_latency = expected_latency.view(bsz, -1)
if self.latency_gather_method == "average":
# bsz * tgt_len
expected_latency = expected_delays.mean(dim=1)
elif self.latency_gather_method == "weighted_average":
weights = torch.nn.functional.softmax(expected_latency, dim=1)
expected_latency = torch.sum(expected_latency * weights, dim=1)
elif self.latency_gather_method == "max":
expected_latency = expected_latency.max(dim=1)[0]
else:
raise NotImplementedError
expected_latency = expected_latency.sum()
avg_loss = self.latency_avg_weight * expected_latency
# 2.2 variance of expected delays
expected_delays_var = (
expected_delays.view(bsz, -1, tgt_len).var(dim=1).mean(dim=1)
)
expected_delays_var = expected_delays_var.sum()
var_loss = self.latency_avg_weight * expected_delays_var
# 3. Final loss
latency_loss = avg_loss + var_loss
return latency_loss, expected_latency, expected_delays_var
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
super().reduce_metrics(logging_outputs)
latency = sum(log.get("latency", 0) for log in logging_outputs)
delays_var = sum(log.get("delays_var", 0) for log in logging_outputs)
latency_loss = sum(log.get("latency_loss", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("latency", latency.float() / nsentences, nsentences, round=3)
metrics.log_scalar("delays_var", delays_var / nsentences, nsentences, round=3)
metrics.log_scalar(
"latency_loss", latency_loss / nsentences, nsentences, round=3
)
| 7,969
| 35.063348
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/model_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
logger = logging.getLogger(__name__)
@dataclass
class ModelCriterionConfig(FairseqDataclass):
loss_weights: Dict[str, float] = field(
default_factory=dict,
metadata={"help": "weights for the loss terms"},
)
log_keys: List[str] = field(
default_factory=list,
metadata={"help": "additional output keys to log"},
)
@register_criterion("model", dataclass=ModelCriterionConfig)
class ModelCriterion(FairseqCriterion):
"""
This criterion relies on the model to supply losses.
The losses should be a dictionary of name -> scalar returned by
the model either by including it in the net_output dict or by
implementing a get_losses(net_output, sample) method. The final loss is
a scaled sum of all losses according to weights in loss_weights.
If no weights are provided, then all losses are scaled by 1.0.
The losses will be automatically logged. Additional keys from
net_output dict can be logged via the log_keys parameter.
"""
def __init__(self, task, loss_weights=None, log_keys=None):
super().__init__(task)
self.loss_weights = loss_weights
self.log_keys = log_keys
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
sample_size = net_output["sample_size"]
scaled_losses = {}
if hasattr(model, "get_losses"):
losses = model.get_losses(net_output, sample)
elif isinstance(net_output, dict) and "losses" in net_output:
losses = net_output["losses"]
else:
raise Exception("Could not retrieve losses")
for lk, p in losses.items():
try:
coef = 1.0 if len(self.loss_weights) == 0 else self.loss_weights[lk]
except KeyError:
logger.error(
f"weight for loss {lk} is not in loss_weights ({self.loss_weights})"
)
raise
if coef != 0 and p is not None:
scaled_losses[lk] = coef * p.float()
loss = sum(scaled_losses.values())
if reduce and loss.numel() > 1:
loss = loss.sum()
logging_output = {
"loss": loss.data,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
"_world_size": 1,
}
for lk in self.log_keys:
if lk in net_output and net_output[lk] is not None:
logging_output[lk] = float(net_output[lk])
if len(scaled_losses) > 1:
for lk, l in scaled_losses.items():
logging_output[f"loss_{lk}"] = l.item()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar("loss", loss_sum / sample_size, sample_size, round=3)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"_world_size",
}
world_size = utils.item(
sum(log.get("_world_size", 0) for log in logging_outputs)
)
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss_"):
metrics.log_scalar(k, val / sample_size, sample_size, round=3)
else:
metrics.log_scalar(k, val / world_size, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,827
| 33.733813
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/wav2vec_criterion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.logging.meters import safe_round
from fairseq.utils import is_xla_tensor
@dataclass
class Wav2VecCriterionConfig(FairseqDataclass):
infonce: bool = field(
default=False,
metadata={
"help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)"
},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig)
class Wav2vecCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
self.xla = is_xla_tensor(logits)
# XXX: handle weights on xla.
weights = None
if hasattr(model, "get_target_weights") and not self.infonce:
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
reduction = "none" if ((not reduce) or self.xla) else "sum"
if self.infonce:
print("logits: ")
# print(logits.size())
print(target)
loss = F.cross_entropy(logits, target, reduction=reduction)
else:
loss = F.binary_cross_entropy_with_logits(
logits, target.float(), weights, reduction=reduction
)
if self.xla:
# tpu-comment: since dynamic shapes lead to recompilations on xla,
# we don't shrink tensors using mask_indices.
# Instead, we use mask indices to adjust loss.
mi = (
sample["net_input"]["mask_indices"]
.transpose(0, 1) # logits are transposed in `model.get_logits`
.reshape(logits.size(0))
)
loss = (loss * mi).sum() if reduce else (loss * mi)
if "sample_size" in sample:
sample_size = sample["sample_size"]
elif "mask_indices" in sample["net_input"]:
sample_size = sample["net_input"]["mask_indices"].sum()
else:
# print(target.size())
# print(target.numel())
sample_size = target.numel() if self.infonce else target.long().sum().item()
losses.append(loss.detach().clone())
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses = model.get_extra_losses(net_output)
# print(extra_losses)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, coef in zip(extra_losses, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
losses.append(p)
logging_output = {
"loss": loss.item() if (reduce and not self.xla) else loss.detach(),
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
for lk in self.log_keys:
# Only store "logits" and "target" for computing MAP and MAUC
# during validation
if lk == "logits":
if not self.training:
logging_output["logits"] = logits.cpu().numpy()
elif lk == "target":
if not self.training:
# If the targets have been mixed with the predictions of
# teacher models, find the original targets
if hasattr(model, "get_original_targets"):
original_target = model.get_original_targets(sample, net_output)
else:
original_target = target
logging_output["target"] = original_target.cpu().numpy()
elif lk in net_output:
value = net_output[lk]
if not is_xla_tensor(value):
value = float(value)
logging_output[lk] = value
if len(losses) > 1:
for i, l in enumerate(losses):
logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach()
if self.infonce:
with torch.no_grad():
if logits.numel() == 0:
corr = 0
count = 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
if is_xla_tensor(logits):
max, min = max * mi, min * mi
both = max & min
corr = max.long().sum() - both.long().sum()
count = mi.sum()
else:
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = float(max.numel())
logging_output["correct"] = corr
logging_output["count"] = count
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
correct = sum(log.get("correct", 0) for log in logging_outputs)
metrics.log_scalar("_correct", correct)
total = sum(log.get("count", 0) for log in logging_outputs)
metrics.log_scalar("_total", total)
if total > 0:
metrics.log_derived(
"accuracy",
lambda meters: safe_round(
meters["_correct"].sum / meters["_total"].sum, 5
)
if meters["_total"].sum > 0
else float("nan"),
)
builtin_keys = {
"loss",
"ntokens",
"nsentences",
"sample_size",
"correct",
"count",
}
for k in logging_outputs[0]:
if k not in builtin_keys:
val = sum(log.get(k, 0) for log in logging_outputs)
if k.startswith("loss"):
metrics.log_scalar(
k, val / (sample_size or 1) / math.log(2), sample_size, round=3
)
else:
metrics.log_scalar(k, val / len(logging_outputs), round=3)
# FIXME: revert when gather based xla reduction is implemented
# @staticmethod
# def logging_outputs_can_be_summed() -> bool:
def logging_outputs_can_be_summed(self) -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
# XXX: Gather based reduction not implemented for xla yet.
# So we fall to sum based reduction for xla.
return self.xla
| 9,178
| 37.567227
| 100
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/legacy_masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
def compute_cross_entropy_loss(logits, targets, ignore_index=-100):
"""
Function to compute the cross entropy loss. The default value of
ignore_index is the same as the default value for F.cross_entropy in
pytorch.
"""
assert logits.size(0) == targets.size(
-1
), "Logits and Targets tensor shapes don't match up"
loss = F.nll_loss(
F.log_softmax(logits, -1, dtype=torch.float32),
targets,
reduction="sum",
ignore_index=ignore_index,
)
return loss
@register_criterion("legacy_masked_lm_loss")
class LegacyMaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
This optionally also computes the next sentence prediction (NSP) loss and
adds it to the overall loss based on the specified args. There are three
cases to consider:
1) Generic MLM training without NSP loss. In this case sentence_targets
and sentence_logits are both None.
2) BERT training without NSP loss. In this case sentence_targets is
not None but sentence_logits is None and we should not be computing
a sentence level loss.
3) BERT training with NSP loss. In this case both sentence_targets and
sentence_logits are not None and we should be computing a sentence
level loss. The weight of the sentence level loss is specified as
an argument.
"""
def __init__(self, task, masked_lm_only, nsp_loss_weight):
super().__init__(task)
self.masked_lm_only = masked_lm_only
self.nsp_loss_weight = nsp_loss_weight
@staticmethod
def add_args(parser):
"""Args for MaskedLM Loss"""
# Default for masked_lm_only is False so as to not break BERT training
parser.add_argument(
"--masked-lm-only",
default=False,
action="store_true",
help="compute MLM loss only",
)
parser.add_argument(
"--nsp-loss-weight",
default=1.0,
type=float,
help="weight for next sentence prediction" " loss (default 1)",
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
lm_logits, output_metadata = model(**sample["net_input"])
# reshape lm_logits from (N,T,C) to (N*T,C)
lm_logits = lm_logits.view(-1, lm_logits.size(-1))
lm_targets = sample["lm_target"].view(-1)
lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx)
# compute the number of tokens for which loss is computed. This is used
# to normalize the loss
ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel()
loss = lm_loss / ntokens
nsentences = sample["nsentences"]
# nsentences = 0
# Compute sentence loss if masked_lm_only is False
sentence_loss = None
if not self.masked_lm_only:
sentence_logits = output_metadata["sentence_logits"]
sentence_targets = sample["sentence_target"].view(-1)
# This needs to be recomputed due to some differences between
# TokenBlock and BlockPair dataset. This can be resolved with a
# refactor of BERTModel which we will do in the future.
# TODO: Remove this after refactor of BERTModel
nsentences = sentence_targets.size(0)
# Check for logits being none which can happen when remove_heads
# is set to true in the BERT model. Ideally we should set
# masked_lm_only to true in this case, but that requires some
# refactor in the BERT model.
if sentence_logits is not None:
sentence_loss = compute_cross_entropy_loss(
sentence_logits, sentence_targets
)
loss += self.nsp_loss_weight * (sentence_loss / nsentences)
# NOTE: as we are summing up per token mlm loss and per sentence nsp loss
# we don't need to use sample_size as denominator for the gradient
# here sample_size is just used for logging
sample_size = 1
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"lm_loss": utils.item(lm_loss.data) if reduce else lm_loss.data,
# sentence loss is not always computed
"sentence_loss": (
(utils.item(sentence_loss.data) if reduce else sentence_loss.data)
if sentence_loss is not None
else 0.0
),
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
lm_loss_sum = sum(log.get("lm_loss", 0) for log in logging_outputs)
sentence_loss_sum = sum(log.get("sentence_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_loss = sum(log.get("loss", 0) for log in logging_outputs)
metrics.log_scalar(
"loss",
agg_loss / sample_size / math.log(2) if sample_size > 0 else 0.0,
sample_size,
round=3,
)
metrics.log_scalar(
"lm_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
metrics.log_scalar(
"sentence_loss",
sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0.0,
nsentences,
round=3,
)
metrics.log_scalar(
"nll_loss",
lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0,
ntokens,
round=3,
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 7,006
| 38.365169
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from .label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
LabelSmoothedCrossEntropyCriterionConfig,
)
from dataclasses import dataclass, field
@dataclass
class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig(
LabelSmoothedCrossEntropyCriterionConfig
):
alignment_lambda: float = field(
default=0.05, metadata={"help": "weight for the alignment loss"}
)
@register_criterion(
"label_smoothed_cross_entropy_with_alignment",
dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig,
)
class LabelSmoothedCrossEntropyCriterionWithAlignment(
LabelSmoothedCrossEntropyCriterion
):
def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda):
super().__init__(task, sentence_avg, label_smoothing)
self.alignment_lambda = alignment_lambda
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
alignment_loss = None
# Compute alignment loss only for training set and non dummy batches.
if "alignments" in sample and sample["alignments"] is not None:
alignment_loss = self.compute_alignment_loss(sample, net_output)
if alignment_loss is not None:
logging_output["alignment_loss"] = utils.item(alignment_loss.data)
loss += self.alignment_lambda * alignment_loss
return loss, sample_size, logging_output
def compute_alignment_loss(self, sample, net_output):
attn_prob = net_output[1]["attn"][0]
bsz, tgt_sz, src_sz = attn_prob.shape
attn = attn_prob.view(bsz * tgt_sz, src_sz)
align = sample["alignments"]
align_weights = sample["align_weights"].float()
if len(align) > 0:
# Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to
# the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing.
loss = -(
(attn[align[:, 1][:, None], align[:, 0][:, None]]).log()
* align_weights[:, None]
).sum()
else:
return None
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss_sum = utils.item(
sum(log.get("nll_loss", 0) for log in logging_outputs)
)
alignment_loss_sum = utils.item(
sum(log.get("alignment_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"alignment_loss",
alignment_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,748
| 35.251908
| 114
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/adaptive_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.constants import DDP_BACKEND_CHOICES
from omegaconf import II
@dataclass
class AdaptiveLossConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
ddp_backend: DDP_BACKEND_CHOICES = II("distributed_training.ddp_backend")
@register_criterion("adaptive_loss", dataclass=AdaptiveLossConfig)
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
@classmethod
def build_criterion(cls, cfg: AdaptiveLossConfig, task):
if cfg.ddp_backend in {"c10d", "pytorch_ddp"}:
raise Exception(
"AdaptiveLoss is not compatible with the PyTorch "
"version of DistributedDataParallel. Please use "
"`--ddp-backend=legacy_ddp` instead."
)
return cls(task, cfg.sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model.decoder, "adaptive_softmax")
and model.decoder.adaptive_softmax is not None
)
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample["net_input"])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view(-1)
bsz = orig_target.size(0)
logits, target = adaptive_softmax(net_output[0], orig_target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduce else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert target[i].min() >= 0 and target[i].max() <= logits[i].size(1)
loss += F.cross_entropy(
logits[i],
target[i],
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": loss.data,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,558
| 35.766129
| 105
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
import math
from omegaconf import II
import torch
from fairseq import metrics, modules, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class MaskedLmConfig(FairseqDataclass):
tpu: bool = II("common.tpu")
@register_criterion("masked_lm", dataclass=MaskedLmConfig)
class MaskedLmLoss(FairseqCriterion):
"""
Implementation for the loss used in masked language model (MLM) training.
"""
def __init__(self, cfg: MaskedLmConfig, task):
super().__init__(task)
self.tpu = cfg.tpu
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
masked_tokens = sample["target"].ne(self.padding_idx)
sample_size = masked_tokens.int().sum()
# Rare: when all tokens are masked, project all tokens.
# We use torch.where to avoid device-to-host transfers,
# except on CPU where torch.where is not well supported
# (see github.com/pytorch/pytorch/issues/26247).
if self.tpu:
masked_tokens = None # always project all tokens on TPU
elif masked_tokens.device == torch.device("cpu"):
if not masked_tokens.any():
masked_tokens = None
else:
masked_tokens = torch.where(
masked_tokens.any(),
masked_tokens,
masked_tokens.new([True]),
)
logits = model(**sample["net_input"], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if masked_tokens is not None:
targets = targets[masked_tokens]
loss = modules.cross_entropy(
logits.view(-1, logits.size(-1)),
targets.view(-1),
reduction="sum",
ignore_index=self.padding_idx,
)
logging_output = {
"loss": loss if self.tpu else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 3,402
| 33.373737
| 79
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/fastspeech2_loss.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from typing import List, Dict, Any
from dataclasses import dataclass, field
import sys
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
from fairseq.models.fairseq_model import FairseqEncoderModel
@dataclass
class FastSpeech2CriterionConfig(FairseqDataclass):
ctc_weight: float = field(default=0.0, metadata={"help": "weight for CTC loss"})
@register_criterion("fastspeech2", dataclass=FastSpeech2CriterionConfig)
class FastSpeech2Loss(FairseqCriterion):
def __init__(self, task, ctc_weight):
super().__init__(task)
self.ctc_weight = ctc_weight
# def forward(self, model: FairseqEncoderModel, sample, reduction="sum"):
def forward(self, model: FairseqEncoderModel, sample, reduction="mean"):
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
_feat_out, _feat_out_post, _, log_dur_out, pitch_out, energy_out = model(
src_tokens=src_tokens,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"],
durations=sample["durations"],
pitches=sample["pitches"],
energies=sample["energies"],
)
src_mask = lengths_to_mask(sample["net_input"]["src_lengths"])
tgt_mask = lengths_to_mask(sample["target_lengths"])
feat_out, feat = _feat_out[tgt_mask], sample["target"][tgt_mask]
l1_loss = F.l1_loss(feat_out, feat, reduction=reduction)
if _feat_out_post is not None:
l1_loss += F.l1_loss(_feat_out_post[tgt_mask], feat, reduction=reduction)
pitch_loss = torch.tensor(0.0).type_as(l1_loss)
energy_loss = torch.tensor(0.0).type_as(l1_loss)
if pitch_out is not None and energy_out is not None:
pitches, energies = sample["pitches"], sample["energies"]
pitch_out, pitches = pitch_out[src_mask], pitches[src_mask]
energy_out, energies = energy_out[src_mask], energies[src_mask]
pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction)
energy_loss = F.mse_loss(energy_out, energies, reduction=reduction)
log_dur_out = log_dur_out[src_mask]
dur = sample["durations"].float()
dur = dur.half() if log_dur_out.type().endswith(".HalfTensor") else dur
log_dur = torch.log(dur + 1)[src_mask]
dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction)
ctc_loss = torch.tensor(0.0).type_as(l1_loss)
if self.ctc_weight > 0.0:
lprobs = model.get_normalized_probs((_feat_out,), log_probs=True)
lprobs = lprobs.transpose(0, 1) # T x B x C
src_mask = lengths_to_mask(src_lens)
src_tokens_flat = src_tokens.masked_select(src_mask)
ctc_loss = (
F.ctc_loss(
lprobs,
src_tokens_flat,
tgt_lens,
src_lens,
reduction=reduction,
zero_infinity=True,
)
* self.ctc_weight
)
loss = l1_loss + dur_loss + pitch_loss + energy_loss + ctc_loss
sample_size = sample["nsentences"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"l1_loss": utils.item(l1_loss.data),
"dur_loss": utils.item(dur_loss.data),
"pitch_loss": utils.item(pitch_loss.data),
"energy_loss": utils.item(energy_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
}
return loss, sample_size, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
for key in [
"loss",
"l1_loss",
"dur_loss",
"pitch_loss",
"energy_loss",
"ctc_loss",
]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
metrics.log_scalar("sample_size", ntot, len(logging_outputs))
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return False
| 5,604
| 39.035714
| 85
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/ctc.py
|
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
@dataclass
class CtcCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="letter",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
wer_kenlm_model: Optional[str] = field(
default=None,
metadata={
"help": "if this is provided, use kenlm to compute wer (along with other wer_* args)"
},
)
wer_lexicon: Optional[str] = field(
default=None,
metadata={"help": "lexicon to use with wer_kenlm_model"},
)
wer_lm_weight: float = field(
default=2.0,
metadata={"help": "lm weight to use with wer_kenlm_model"},
)
wer_word_score: float = field(
default=-1.0,
metadata={"help": "lm word score to use with wer_kenlm_model"},
)
wer_args: Optional[str] = field(
default=None,
metadata={
"help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)"
},
)
@register_criterion("ctc", dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index(task.blank_symbol)
if hasattr(task, "blank_symbol")
else 0
)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if cfg.wer_args is not None:
(
cfg.wer_kenlm_model,
cfg.wer_lexicon,
cfg.wer_lm_weight,
cfg.wer_word_score,
) = eval(cfg.wer_args)
if cfg.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(
net_output, log_probs=True
).contiguous() # (T, B, C) from the encoder
if "src_lengths" in sample["net_input"]:
input_lengths = sample["net_input"]["src_lengths"]
else:
if net_output["padding_mask"] is not None:
non_padding_mask = ~net_output["padding_mask"]
input_lengths = non_padding_mask.long().sum(-1)
else:
input_lengths = lprobs.new_full(
(lprobs.size(1),), lprobs.size(0), dtype=torch.long
)
pad_mask = (sample["target"] != self.pad_idx) & (
sample["target"] != self.eos_idx
)
targets_flat = sample["target"].masked_select(pad_mask)
if "target_lengths" in sample:
target_lengths = sample["target_lengths"]
else:
target_lengths = pad_mask.sum(-1)
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": ntokens,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
if not model.training:
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t,
sample["target_label"]
if "target_label" in sample
else sample["target"],
input_lengths,
):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if self.w2l_decoder is not None:
decoded = self.w2l_decoder.decode(lp)
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 11,065
| 36.385135
| 104
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/cross_entropy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import math
import torch
import logging
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class CrossEntropyCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("cross_entropy", dataclass=CrossEntropyCriterionConfig)
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# Check: inspect LM loading process and LM model
# logging.info(" Checking language model ...... ")
# model.eval()
# # dummy_inputs = torch.tensor(
# # [[2,38,817,72,220,80,594,168,
# # 29,19,17,42,146,518,436]]
# # ).cuda() # For validation
# dummy_inputs = torch.tensor(
# [[2, 320, 1018, 1090, 553]]
# ).cuda() # For training
# dummy_lm_logits, _ = model(src_tokens=dummy_inputs)
# dummy_preds = dummy_lm_logits.max(-1).indices
# dummy_logprobs = utils.log_softmax(
# dummy_lm_logits.float(), dim=-1)
# dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.padding_idx, reduction="mean")
# logging.info(f"dummy_inputs: {dummy_inputs[0, 1:]}")
# logging.info(f"dummy_preds: {dummy_preds[0]}")
# logging.info(f"dummy_nll_loss: {dummy_nll_loss}")
# logging.info(f"Language model inspection is done.")
net_output = model(**sample["net_input"])
loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
# Check: little test for single sample
# tokens = torch.tensor(
# [[ 2, 38, 817, 72, 220, 80, 594, 168, 29, 19, 17, 42,146, 518, 436]]).cuda()
# model.eval()
# output, _ = model(src_tokens=tokens)
# print(output.max(-1))
# sys.exit(0)
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"normal_nll_loss", loss_sum / ntokens / math.log2(2), ntokens, round=3
)
metrics.log_derived(
"ppl",
lambda meters: utils.get_perplexity(
meters["nll_loss"].avg, base=math.e
),
)
metrics.log_derived(
"normal_ppl",
lambda meters: utils.get_perplexity(
meters["normal_nll_loss"].avg, base=math.e
),
)
else:
metrics.log_derived(
"ppl",
lambda meters: utils.get_perplexity(meters["loss"].avg, base=math.e),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 5,192
| 35.829787
| 112
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import importlib
import os
from fairseq import registry
from fairseq.criterions.fairseq_criterion import ( # noqa
FairseqCriterion,
LegacyFairseqCriterion,
)
from omegaconf import DictConfig
(
build_criterion_,
register_criterion,
CRITERION_REGISTRY,
CRITERION_DATACLASS_REGISTRY,
) = registry.setup_registry(
"--criterion", base_class=FairseqCriterion, default="cross_entropy"
)
def build_criterion(cfg: DictConfig, task):
return build_criterion_(cfg, task)
# automatically import any Python files in the criterions/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.criterions." + file_name)
| 997
| 25.972973
| 71
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/sentence_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class SentencePredictionConfig(FairseqDataclass):
classification_head_name: str = field(
default="sentence_classification_head",
metadata={"help": "name of the classification head to use"},
)
regression_target: bool = field(
default=False,
)
@register_criterion("sentence_prediction", dataclass=SentencePredictionConfig)
class SentencePredictionCriterion(FairseqCriterion):
def __init__(self, cfg: SentencePredictionConfig, task):
super().__init__(task)
self.classification_head_name = cfg.classification_head_name
self.regression_target = cfg.regression_target
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
)
targets = model.get_targets(sample, [logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
else:
logits = logits.view(-1).float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = logits.argmax(dim=1)
logging_output["ncorrect"] = (preds == targets).sum()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar(
"accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 3,927
| 36.409524
| 96
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/cif_correction_loss.py
|
# @Time : 2021/7/14
# @Author : Minglun Han
# @File : cif_correction_loss.py
"""""
Update:
By 2022/0805
1. Add monitoring for negative correction loss and positive correction loss (done);
2. Freeze the batch normalization parameters for Conformer structure (done);
3. Remove redundant codes (done);
""" ""
import sys
import math
import editdistance
import numpy as np
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
import sklearn
from sklearn.metrics import auc
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
np.set_printoptions(threshold=100000)
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
"""
:param lprobs: log probabilities with shape B x T x V
:param target: targets with shape B x T
:param epsilon: Epsilon
:param ignore_index: padding index
:param reduce: whether sum all positions loss
:return: smoothed cross entropy loss
"""
# Calculate losses
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1) # B x T x 1
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
# Reduce losses
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
# Get final smoothed cross-entropy loss
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@dataclass
class CifCriterionConfig(FairseqDataclass):
# General settings
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="char",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
# Settings of cif losses
no_quantity_loss: bool = field(
default=False, metadata={"help": "apply quantity loss"}
)
no_ctc_loss: bool = field(default=False, metadata={"help": "apply ctc loss"})
apply_align_loss: bool = field(default=False, metadata={"help": "apply align loss"})
quantity_loss_lambda: float = field(
default=1.0, metadata={"help": "the interpolation weight of quantity loss"}
)
ctc_loss_lambda: float = field(
default=0.25, metadata={"help": "the interpolation weight of ctc loss"}
)
align_loss_lambda: float = field(
default=1.0,
metadata={"help": "the interpolation weight of ctc-constrained alignment loss"},
)
apply_label_smoothing: bool = field(
default=False,
metadata={"help": "apply label smoothing over cross entropy loss"},
)
label_smoothing_type: str = field(
default="uniform", metadata={"help": "specify the label smoothing type"}
)
label_smoothing_rate: float = field(
default=0.1, metadata={"help": "the rate of label smoothing"}
)
no_eos_label: bool = field(default=False)
apply_focal_loss: bool = field(default=False)
focal_loss_gamma: float = field(default=2.0)
no_comb_loss_sum: bool = field(default=False)
# uncertainty estimation loss (UE loss) settings
ue_loss_lambda: float = field(default=1.0)
apply_ue_focal_loss: bool = field(default=False)
ue_focal_scaling_weight: float = field(default=1.0)
ue_focal_loss_gamma: float = field(default=2.0)
apply_ue_cb_loss: bool = field(default=False)
ue_cb_loss_beta: float = field(default=0.90)
reverse_uem_labels: bool = field(default=False)
use_pred_softmax_prob_as_conf: bool = field(default=False)
# correction loss (Correction loss) settings
corr_loss_lambda: float = field(default=1.0)
apply_corr_focal_loss: bool = field(default=False)
corr_focal_scaling_weight: float = field(default=1.0)
corr_focal_loss_gamma: float = field(default=2.0)
def eval_nce(labels, probs, eps=1e-8):
labels = torch.tensor(labels).float()
probs = torch.tensor(probs).float()
def stable_log(tensor):
return torch.log(torch.clip(tensor, min=eps, max=1.0))
total_sample_num = labels.size()[0]
correct_sample_num = labels[labels == 1].size()[0]
cross_entropy = (
-1
* (
torch.dot(labels, stable_log(probs))
+ torch.dot((1 - labels), stable_log(1 - probs))
)
/ total_sample_num
)
p_c = torch.tensor(float(correct_sample_num) / float(total_sample_num))
entropy = (
-1
* (
correct_sample_num * torch.log(p_c)
+ (total_sample_num - correct_sample_num) * torch.log(1 - p_c)
)
/ total_sample_num
)
normalized_ce = (entropy - cross_entropy) / entropy
return normalized_ce
@register_criterion("cif_correction_loss", dataclass=CifCriterionConfig)
class CifCorrectionLoss(FairseqCriterion):
def __init__(self, cfg: CifCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index("<ctc_blank>")
if "<ctc_blank>" in task.target_dictionary.indices
else task.target_dictionary.bos()
)
self.pad_idx = task.target_dictionary.pad() # 1
self.eos_idx = task.target_dictionary.eos() # 2
self.bos_idx = task.target_dictionary.bos() # 0
self.no_correction_idx = len(task.target_dictionary)
self.post_process = cfg.post_process
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
# Register losses settings
self.apply_quantity_loss = not cfg.no_quantity_loss
self.apply_ctc_loss = not cfg.no_ctc_loss
self.apply_align_loss = cfg.apply_align_loss
self.quantity_loss_lambda = cfg.quantity_loss_lambda
self.ctc_loss_lambda = cfg.ctc_loss_lambda
self.align_loss_lambda = cfg.align_loss_lambda
# Register label smoothing settings
self.label_smoothing_type = cfg.label_smoothing_type
self.label_smoothing_rate = cfg.label_smoothing_rate
self.apply_label_smoothing = cfg.apply_label_smoothing
self.apply_focal_loss = cfg.apply_focal_loss
self.focal_loss_gamma = cfg.focal_loss_gamma
self.no_eos_label = cfg.no_eos_label
self.no_comb_loss_sum = cfg.no_comb_loss_sum
# Register correction loss settings
self.ue_loss_lambda = cfg.ue_loss_lambda
self.apply_ue_focal_loss = cfg.apply_ue_focal_loss
self.ue_focal_scaling_weight = cfg.ue_focal_scaling_weight
self.ue_focal_loss_gamma = cfg.ue_focal_loss_gamma
self.apply_ue_cb_loss = cfg.apply_ue_cb_loss
self.ue_cb_loss_beta = cfg.ue_cb_loss_beta
self.reverse_uem_labels = cfg.reverse_uem_labels
self.use_pred_softmax_prob_as_conf = cfg.use_pred_softmax_prob_as_conf
self.corr_loss_lambda = cfg.corr_loss_lambda
self.apply_corr_focal_loss = cfg.apply_corr_focal_loss
self.corr_focal_scaling_weight = cfg.corr_focal_scaling_weight
self.corr_focal_loss_gamma = cfg.corr_focal_loss_gamma
def get_loss(self, model, sample, net_output, reduce=True):
# Get model outputs
cif_out_padding_mask = net_output["cif_out_padding_mask"] # B x T
# Correction training needed values
# corr_cif_outputs = net_output["corr_cif_outputs"] # B x T x C
corr_cif_padding_mask = net_output["corr_cif_padding_mask"] # B x T
uem_logits = net_output["uem_logits"] # B x T
uem_labels = net_output["uem_labels"] # B x (1 + K) x T
cordec_logits = net_output[
"cordec_logits"
] # B x T x (V + 1), the extra one token is <no-cor> mark
cordec_labels = net_output["cordec_labels"] # B x (1 + K) x T
uem_padding_mask = net_output["uem_padding_mask"].bool() # B x T
aligned_targets = net_output["aligned_targets"] # B x T
aligned_preds = net_output["aligned_preds"]
decoder_logits = net_output["asr_decoder_logits"] # B x T
if self.reverse_uem_labels:
uem_labels = 1 - uem_labels
# Collect targets and target_length for ctc loss and ce loss
target_lengths = sample["target_lengths"] # targets length without eos
target_with_eos = sample["target"]
target_with_eos_lengths = target_lengths # targets length with eos
if self.no_eos_label:
target_with_eos_lengths = target_with_eos_lengths - 1
target_with_eos = torch.where(
target_with_eos == self.eos_idx,
self.pad_idx * torch.ones_like(target_with_eos),
target_with_eos,
)
adjusted_target_with_eos = target_with_eos
# Calculate the cross-entropy loss
cif_max_len = cif_out_padding_mask.size(1) # Get max length of cif outputs
tgt_max_len = target_with_eos_lengths.max() # Get max length of targets
reg_min_len = min(
cif_max_len, tgt_max_len
) # Obtain the minimum length of cif length and target length
# Calculate the uncertainty estimation (UE) loss
cif_max_len = corr_cif_padding_mask.size(-1)
org_uem_probs, org_uem_labels = None, None
ue_loss = torch.tensor(0.0)
ue_pos_loss = torch.tensor(0.0)
ue_neg_loss = torch.tensor(0.0)
if model.args.use_uem or self.ue_loss_lambda != 0.0:
# process labels and probs
uem_labels = uem_labels[:, :cif_max_len] # B x T
uem_logits = uem_logits[:, :cif_max_len] # B x T
uem_probs = torch.sigmoid(uem_logits.float()) # B x T
org_uem_probs = uem_probs # B x T
org_uem_labels = uem_labels # B x T
if self.apply_ue_focal_loss:
scaling_weight = torch.where(
uem_labels == 1, 1 - uem_probs, uem_probs
) # B x T
scaling_weight = self.ue_focal_scaling_weight * (
scaling_weight**self.ue_focal_loss_gamma
)
scaling_weight = scaling_weight[uem_padding_mask] # (B x T)
uem_probs = uem_probs[uem_padding_mask] # (B x T)
uem_labels = uem_labels[uem_padding_mask] # (B x T)
elif self.apply_ue_cb_loss:
uem_probs = uem_probs[uem_padding_mask] # (B x T)
uem_labels = uem_labels[uem_padding_mask] # (B x T)
pos_label_num = uem_labels[uem_labels == 1].size(0)
neg_label_num = uem_labels[uem_labels == 0].size(0)
scaling_weight = torch.where(
uem_labels == 1,
(1 - self.ue_cb_loss_beta)
/ (1 - self.ue_cb_loss_beta**pos_label_num),
(1 - self.ue_cb_loss_beta)
/ (1 - self.ue_cb_loss_beta**neg_label_num),
) # (B x T)
else:
scaling_weight = torch.ones_like(uem_labels)[uem_padding_mask]
uem_probs = uem_probs[uem_padding_mask] # (B x T)
uem_labels = uem_labels[uem_padding_mask] # (B x T)
# Calculate cross-entropy loss
try:
ue_loss = F.binary_cross_entropy(
uem_probs.float(), uem_labels.float(), reduction="none"
)
except:
print(uem_probs)
print(uem_labels)
print(uem_probs[(uem_probs < 0) | (uem_probs > 1)])
print(uem_labels[(uem_labels < 0) | (uem_labels > 1)])
sys.exit(0)
ue_loss = (scaling_weight * ue_loss).sum()
# Calculate negtive loss
uem_neg_labels = uem_labels[uem_labels == 0]
uem_neg_probs = uem_probs[uem_labels == 0]
ue_neg_loss = F.binary_cross_entropy(
uem_neg_probs.float(), uem_neg_labels.float(), reduction="sum"
)
# Calculate positive loss
uem_pos_labels = uem_labels[uem_labels == 1]
uem_pos_probs = uem_probs[uem_labels == 1]
ue_pos_loss = F.binary_cross_entropy(
uem_pos_probs.float(),
uem_pos_labels.float(),
reduction="sum",
)
# Calculate the correction cross-entropy (Corr-CE) loss
corr_loss = torch.tensor(0.0)
pos_corr_loss = torch.tensor(0.0)
neg_corr_loss = torch.tensor(0.0)
org_corr_probs, org_corr_lprobs, org_corr_labels = None, None, None
pos_corr_nsamples, neg_corr_nsamples = 1, 1
if model.args.use_cordec and self.corr_loss_lambda != 0.0:
# process labels for correction loss
corr_ce_labels = cordec_labels[:, :cif_max_len] # B x T
org_corr_labels = corr_ce_labels
corr_ce_labels = corr_ce_labels.view(-1) # (B x T)
# process probs and log-probs for correction loss
corr_probs = model.get_probs_from_logits(cordec_logits, log_probs=False)[
:, :cif_max_len, :
] # B x T x V
if model.args.use_corr_pgn:
gate_ratio = net_output["gate_ratio"] # B x T x 1
copy_probs = net_output["copy_probs"][:, :cif_max_len, :] # B x T x V
corr_probs = gate_ratio * corr_probs + (1 - gate_ratio) * copy_probs
corr_lprobs = torch.log(corr_probs)
org_corr_lprobs = corr_lprobs # B x T x V
corr_lprobs = corr_lprobs.view(-1, corr_lprobs.size(-1)) # (B x T) x V
org_corr_probs = corr_probs # B x T x V
corr_probs = corr_probs.view(-1, corr_probs.size(-1)) # (B x T) x V
# calculate focal loss weight
corr_scaling_weight = torch.ones_like(corr_ce_labels) # (B x T)
if self.apply_corr_focal_loss:
corr_scaling_weight = F.one_hot(
corr_ce_labels.long(), num_classes=corr_lprobs.size(-1)
) # (B x T) x V
corr_scaling_weight = (corr_scaling_weight * (1 - corr_probs)).sum(
-1
) # (B x T)
corr_scaling_weight = self.corr_focal_scaling_weight * (
corr_scaling_weight**self.corr_focal_loss_gamma
) # (B x T)
# calculate correction ce loss
corr_loss = F.nll_loss(
corr_lprobs.float(),
corr_ce_labels.long(),
ignore_index=self.pad_idx,
reduction="none",
)
corr_loss = (corr_scaling_weight * corr_loss).sum()
# calculate positive loss and negative loss
pos_corr_ce_labels = corr_ce_labels[
corr_ce_labels != self.no_correction_idx
]
pos_corr_probs = corr_lprobs[corr_ce_labels != self.no_correction_idx]
pos_corr_loss = F.nll_loss(
pos_corr_probs.float(),
pos_corr_ce_labels.long(),
ignore_index=self.pad_idx,
reduction="none",
)
pos_corr_loss = pos_corr_loss.sum().detach()
pos_corr_nsamples = corr_ce_labels[
(corr_ce_labels != self.pad_idx)
& (corr_ce_labels != self.no_correction_idx)
].size(0)
neg_corr_ce_labels = corr_ce_labels[
corr_ce_labels == self.no_correction_idx
]
neg_corr_probs = corr_lprobs[corr_ce_labels == self.no_correction_idx]
neg_corr_loss = F.nll_loss(
neg_corr_probs.float(),
neg_corr_ce_labels.long(),
ignore_index=self.pad_idx,
reduction="none",
)
neg_corr_loss = neg_corr_loss.sum().detach()
neg_corr_nsamples = max(
corr_ce_labels[
(corr_ce_labels != self.pad_idx)
& (corr_ce_labels == self.no_correction_idx)
].size(0),
1,
)
# Collect the number of tokens in current batch
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
ntokens_with_eos = target_with_eos_lengths.sum().item()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
corr_nsamples = uem_labels.size(0)
neg_ue_nsamples = uem_labels[uem_labels == 0].size(0)
pos_ue_nsamples = uem_labels[uem_labels == 1].size(0)
loss = ue_loss * self.ue_loss_lambda + corr_loss * self.corr_loss_lambda
# CHECK: check all losses
if torch.isnan(ue_loss).sum() != 0 or torch.isinf(ue_loss).sum() != 0:
print("Gradient problems: ")
print(ue_loss)
if torch.isnan(corr_loss).sum() != 0 or torch.isinf(corr_loss).sum() != 0:
print("Gradient problems: ")
print(corr_loss)
# Build final logging outputs
logging_output = {
"loss": utils.item(loss.data),
"ue_loss": utils.item(ue_loss.data),
"ue_neg_loss": utils.item(ue_neg_loss.data),
"ue_pos_loss": utils.item(ue_pos_loss.data),
"corr_loss": utils.item(corr_loss.data),
"pos_corr_loss": utils.item(pos_corr_loss.data),
"neg_corr_loss": utils.item(neg_corr_loss.data),
"ntokens": ntokens,
"ntokens_with_eos": ntokens_with_eos,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
"corr_nsamples": corr_nsamples,
"neg_ue_nsamples": neg_ue_nsamples,
"pos_ue_nsamples": pos_ue_nsamples,
"pos_corr_nsamples": pos_corr_nsamples,
"neg_corr_nsamples": neg_corr_nsamples,
}
# Evaluate on valid sets
# if not model.training:
# with torch.no_grad():
# lprobs_t = ce_logprobs.float().contiguous().cpu()
# cif_lengths = cif_out_padding_mask.int().sum(dim=-1) # B x T
#
# c_err = 0
# c_len = 0
# w_errs = 0
# w_len = 0
# wv_errs = 0
#
# # Loop over all hypothesis
# for lp, t, inp_l in zip(
# lprobs_t,
# adjusted_target_with_eos,
# cif_lengths
# ):
# lp = lp[:inp_l].unsqueeze(0)
#
# # Process targets
# # p = (t != self.task.target_dictionary.pad()) & (t != self.task.target_dictionary.eos())
# p = (t != self.task.target_dictionary.pad())
# targ = t[p]
# targ_units = self.task.target_dictionary.string(targ)
# targ_units_arr = targ.tolist()
#
# # Handle lp without elements
# if min(lp.shape) == 0:
# toks = targ
# else:
# toks = lp.argmax(dim=-1)
#
# pred_units_arr = \
# toks[(toks != self.blank_idx) & (toks != self.pad_idx)].tolist()
#
# # Calculate character error
# c_err += editdistance.eval(pred_units_arr, targ_units_arr)
# c_len += len(targ_units_arr)
#
# targ_words = post_process(targ_units, self.post_process).split()
# pred_units = self.task.target_dictionary.string(pred_units_arr)
# pred_words_raw = post_process(pred_units, self.post_process).split()
#
# # Calculate word error
# dist = editdistance.eval(pred_words_raw, targ_words)
# w_errs += dist
# wv_errs += dist
#
# w_len += len(targ_words)
#
# logging_output["wv_errors"] = wv_errs
# logging_output["w_errors"] = w_errs
# logging_output["w_total"] = w_len
# logging_output["c_errors"] = c_err
# logging_output["c_total"] = c_len
# Check uncertainty estimation accuracy on validation set
if self.ue_loss_lambda != 0.0:
with torch.no_grad():
uem_probs = org_uem_probs.float().contiguous().cpu() # B x T
uem_labels = org_uem_labels.int().contiguous().cpu() # B x T
uem_lens = uem_padding_mask.int().sum(dim=-1) # B
# Get softmax confidence score / uncertainty score
conf_ce_probs = model.get_probs_from_logits(
decoder_logits, log_probs=False
)[
:, :cif_max_len, :
] # B x T x V
onehot_mask = F.one_hot(
aligned_preds.long(), num_classes=conf_ce_probs.size(-1)
)
conf_probs = (conf_ce_probs * onehot_mask).sum(-1) # B x T
# conf_probs = conf_ce_probs.max(-1).values # B x T
ue_sm_probs = 1 - conf_probs # B x T
ue_sm_probs = ue_sm_probs.float().contiguous().cpu() # B x T
# Loop over all hypothesis
pred_pos_num, pred_neg_num = 0, 0
label_pos_num, label_neg_num = 0, 0
uem_total_num, uem_correct_num = 0, 0
uem_recall_total_pos_num, uem_recall_correct_pos_num = 0, 0
uem_precision_total_pos_num, uem_precision_correct_pos_num = 0, 0
auc_labels, auc_probs, auc_sm_probs = [], [], []
for probs, sm_probs, label, max_len in zip(
uem_probs, ue_sm_probs, uem_labels, uem_lens
):
probs = probs[:max_len] # T
sm_probs = sm_probs[:max_len] # T
label = label[:max_len] # T
pred = (probs > 0.5).int()
pred_pos_num += (pred == 1).sum()
pred_neg_num += (pred != 1).sum()
label_pos_num += (label == 1).sum()
label_neg_num += (label != 1).sum()
label_pos_locations = (label == 1).bool()
pred_for_recall = pred[label_pos_locations]
label_for_recall = label[label_pos_locations]
uem_recall_correct_pos_num += (
pred_for_recall == label_for_recall
).sum()
uem_recall_total_pos_num += label_for_recall.size()[0]
pred_pos_locations = (pred == 1).bool()
pred_for_precision = pred[pred_pos_locations]
label_for_precision = label[pred_pos_locations]
uem_precision_correct_pos_num += (
pred_for_precision == label_for_precision
).sum()
uem_precision_total_pos_num += pred_for_precision.size()[0]
comp_res = (pred == label).int()
uem_total_num += comp_res.size()[0]
uem_correct_num += (comp_res == 1).sum()
# Collect labels and probs for the calculation of AUC
auc_labels.append(label)
auc_probs.append(probs)
auc_sm_probs.append(sm_probs)
auc_labels = np.concatenate(auc_labels, axis=0)
auc_probs = np.concatenate(auc_probs, axis=0)
auc_sm_probs = np.concatenate(auc_sm_probs, axis=0)
if self.reverse_uem_labels:
auc_sm_probs = 1 - auc_sm_probs
sm_nce = eval_nce(auc_labels, auc_sm_probs)
uem_nce = eval_nce(auc_labels, auc_probs)
try:
# For uem probs
uem_auc = roc_auc_score(auc_labels, auc_probs, average=None)
precision, recall, _ = precision_recall_curve(auc_labels, auc_probs)
uem_pr_auc = auc(recall, precision)
neg_precision, neg_recall, _ = precision_recall_curve(
1 - auc_labels, 1 - auc_probs
)
uem_pr_neg_auc = auc(neg_recall, neg_precision)
# For softmax probs
uem_sm_auc = roc_auc_score(auc_labels, auc_sm_probs, average=None)
precision, recall, _ = precision_recall_curve(
auc_labels, auc_sm_probs
)
uem_sm_pr_auc = auc(recall, precision)
neg_precision, neg_recall, _ = precision_recall_curve(
1 - auc_labels, 1 - auc_sm_probs
)
uem_sm_pr_neg_auc = auc(neg_recall, neg_precision)
except ValueError:
print("Encounter ValueError, ignore it.")
uem_auc, uem_pr_auc, uem_pr_neg_auc = 1.0, 1.0, 1.0
uem_sm_auc, uem_sm_pr_auc, uem_sm_pr_neg_auc = 1.0, 1.0, 1.0
logging_output["uem_pred_pos_num"] = pred_pos_num.item()
logging_output["uem_pred_neg_num"] = pred_neg_num.item()
logging_output["uem_label_pos_num"] = label_pos_num.item()
logging_output["uem_label_neg_num"] = label_neg_num.item()
logging_output["uem_total_num"] = uem_total_num
logging_output["uem_correct_num"] = uem_correct_num.item()
logging_output["uem_recall_total_pos_num"] = uem_recall_total_pos_num
logging_output[
"uem_recall_correct_pos_num"
] = uem_recall_correct_pos_num.item()
logging_output[
"uem_precision_total_pos_num"
] = uem_precision_total_pos_num
logging_output[
"uem_precision_correct_pos_num"
] = uem_precision_correct_pos_num.item()
logging_output["uem_auc"] = uem_auc
logging_output["uem_pr_auc"] = uem_pr_auc
logging_output["uem_pr_neg_auc"] = uem_pr_neg_auc
logging_output["uem_sm_auc"] = uem_sm_auc
logging_output["uem_sm_pr_auc"] = uem_sm_pr_auc
logging_output["uem_sm_pr_neg_auc"] = uem_sm_pr_neg_auc
logging_output["sm_nce"] = sm_nce
logging_output["uem_nce"] = uem_nce
# Check correction outputs accuracy on validation set
if self.corr_loss_lambda != 0.0:
with torch.no_grad():
corr_probs = org_corr_lprobs.float().contiguous().cpu() # B x T x V
corr_preds = torch.argmax(corr_probs, dim=-1) # B x T
cordec_labels = org_corr_labels.int().contiguous().cpu() # B x T
uem_lens = uem_padding_mask.int().sum(dim=-1) # B
# Loop over all hypothesis
corr_total_num, corr_correct_num = 0, 0
corr_total_pos_num, corr_correct_pos_num = 0, 0
for pred, label, max_len in zip(corr_preds, cordec_labels, uem_lens):
pred = pred[:max_len] # T
label = label[:max_len] # T
comp_res = (pred == label).int()
corr_total_num += comp_res.size()[0]
corr_correct_num += (comp_res == 1).sum()
label_pos_locations = (label != self.no_correction_idx).bool()
label_for_recall = label[label_pos_locations]
pred_for_recall = pred[label_pos_locations]
corr_correct_pos_num += (pred_for_recall == label_for_recall).sum()
corr_total_pos_num += label_for_recall.size()[0]
logging_output["corr_total_num"] = corr_total_num
logging_output["corr_correct_num"] = corr_correct_num.item()
logging_output["corr_total_pos_num"] = corr_total_pos_num
logging_output["corr_correct_pos_num"] = corr_correct_pos_num.item()
# Calculate the total loss
if self.no_comb_loss_sum:
loss = (
ue_loss * self.ue_loss_lambda / corr_nsamples
+ corr_loss * self.corr_loss_lambda / corr_nsamples
)
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True):
# Attention: Freeze all batch normalization
if model.args.freeze_asr_main_body:
model.freeze_batch_norm(model)
# Forward the whole model
net_output = model(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
target_lengths=sample["target_lengths"],
target=sample["target"],
)
# Calculate losses
loss, sample_size, logging_output = self.get_loss(
model, sample, net_output, reduce=True
)
return loss, sample_size, logging_output
def expand_tensor_dim(self, x, expand_size, target_dim=1, reduce=False):
assert (
target_dim == 1
), "only the expansion at the second dimension is available."
rank = len(x.size())
unsq_x = x.unsqueeze(target_dim)
if rank == 1:
sz1 = x.size()
x = unsq_x.repeat(1, expand_size)
x = x.view(sz1 * expand_size) if reduce else x
elif rank == 2:
sz1, sz2 = x.size()
x = unsq_x.repeat(1, expand_size, 1)
x = x.view((sz1 * expand_size), sz2) if reduce else x
elif rank == 3:
sz1, sz2, sz3 = x.size()
x = unsq_x.repeat(1, expand_size, 1, 1)
x = x.view((sz1 * expand_size), sz2, sz3) if reduce else x
else:
raise NotImplementedError("Not supported rank %d" % rank)
return x
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""
Aggregate logging outputs from data parallel training.
"""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ue_loss_sum = utils.item(sum(log.get("ue_loss", 0) for log in logging_outputs))
ue_neg_loss_sum = utils.item(
sum(log.get("ue_neg_loss", 0) for log in logging_outputs)
)
ue_pos_loss_sum = utils.item(
sum(log.get("ue_pos_loss", 0) for log in logging_outputs)
)
corr_loss_sum = utils.item(
sum(log.get("corr_loss", 0) for log in logging_outputs)
)
neg_corr_loss_sum = utils.item(
sum(log.get("neg_corr_loss", 0) for log in logging_outputs)
)
pos_corr_loss_sum = utils.item(
sum(log.get("pos_corr_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
ntokens_with_eos = utils.item(
sum(log.get("ntokens_with_eos", 0) for log in logging_outputs)
)
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
corr_nsamples = utils.item(
sum(log.get("corr_nsamples", 0) for log in logging_outputs)
)
pos_ue_nsamples = utils.item(
sum(log.get("pos_ue_nsamples", 0) for log in logging_outputs)
)
neg_ue_nsamples = utils.item(
sum(log.get("neg_ue_nsamples", 0) for log in logging_outputs)
)
pos_corr_nsamples = utils.item(
sum(log.get("pos_corr_nsamples", 0) for log in logging_outputs)
)
neg_corr_nsamples = utils.item(
sum(log.get("neg_corr_nsamples", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"ue_loss", ue_loss_sum / corr_nsamples / math.log(2), corr_nsamples, round=5
)
metrics.log_scalar(
"ue_pos_loss",
ue_pos_loss_sum / pos_ue_nsamples / math.log(2),
pos_ue_nsamples,
round=5,
)
metrics.log_scalar(
"ue_neg_loss",
ue_neg_loss_sum / neg_ue_nsamples / math.log(2),
neg_ue_nsamples,
round=5,
)
metrics.log_scalar(
"corr_loss",
corr_loss_sum / corr_nsamples / math.log(2),
corr_nsamples,
round=5,
)
metrics.log_scalar(
"corr_pos_loss",
pos_corr_loss_sum / pos_corr_nsamples / math.log(2),
pos_corr_nsamples,
round=5,
)
metrics.log_scalar(
"corr_neg_loss",
neg_corr_loss_sum / neg_corr_nsamples / math.log(2),
neg_corr_nsamples,
round=5,
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("ntokens_with_eos", ntokens_with_eos)
metrics.log_scalar("nsentences", nsentences)
metrics.log_scalar("corr_nsamples", corr_nsamples)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["c_errors"].sum * 100.0 / meters["c_total"].sum, 3
)
if meters["c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["w_errors"].sum * 100.0 / meters["w_total"].sum, 3
)
if meters["w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["wv_errors"].sum * 100.0 / meters["w_total"].sum, 3
)
if meters["w_total"].sum > 0
else float("nan"),
)
uem_pred_pos_num = sum(
log.get("uem_pred_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_pred_pos_num", uem_pred_pos_num)
uem_pred_neg_num = sum(
log.get("uem_pred_neg_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_pred_neg_num", uem_pred_neg_num)
uem_label_pos_num = sum(
log.get("uem_label_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_label_pos_num", uem_label_pos_num)
uem_label_neg_num = sum(
log.get("uem_label_neg_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_label_neg_num", uem_label_neg_num)
uem_total_num = sum(log.get("uem_total_num", 0) for log in logging_outputs)
metrics.log_scalar("uem_total_num", uem_total_num)
uem_correct_num = sum(log.get("uem_correct_num", 0) for log in logging_outputs)
metrics.log_scalar("uem_correct_num", uem_correct_num)
uem_recall_correct_pos_num = sum(
log.get("uem_recall_correct_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_recall_correct_pos_num", uem_recall_correct_pos_num)
uem_recall_total_pos_num = sum(
log.get("uem_recall_total_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_recall_total_pos_num", uem_recall_total_pos_num)
uem_precision_correct_pos_num = sum(
log.get("uem_precision_correct_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar(
"uem_precision_correct_pos_num", uem_precision_correct_pos_num
)
uem_precision_total_pos_num = sum(
log.get("uem_precision_total_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_precision_total_pos_num", uem_precision_total_pos_num)
uem_auc_sum = sum(log.get("uem_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_auc_sum", uem_auc_sum)
uem_pr_auc_sum = sum(log.get("uem_pr_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_pr_auc_sum", uem_pr_auc_sum)
uem_pr_neg_auc_sum = sum(
log.get("uem_pr_neg_auc", 0) for log in logging_outputs
)
metrics.log_scalar("uem_pr_neg_auc_sum", uem_pr_neg_auc_sum)
uem_sm_auc_sum = sum(log.get("uem_sm_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_sm_auc_sum", uem_sm_auc_sum)
uem_sm_pr_auc_sum = sum(log.get("uem_sm_pr_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_sm_pr_auc_sum", uem_sm_pr_auc_sum)
uem_sm_pr_neg_auc_sum = sum(
log.get("uem_sm_pr_neg_auc", 0) for log in logging_outputs
)
metrics.log_scalar("uem_sm_pr_neg_auc_sum", uem_sm_pr_neg_auc_sum)
sm_nce_sum = sum(log.get("sm_nce", 0) for log in logging_outputs)
metrics.log_scalar("sm_nce_sum", sm_nce_sum)
uem_nce_sum = sum(log.get("uem_nce", 0) for log in logging_outputs)
metrics.log_scalar("uem_nce_sum", uem_nce_sum)
if uem_correct_num > 0:
metrics.log_derived(
"uem_accuracy",
lambda meters: safe_round(
meters["uem_correct_num"].sum * 100.0 / meters["uem_total_num"].sum,
3,
)
if meters["uem_correct_num"].sum > 0
else float("nan"),
)
metrics.log_derived(
"uem_recall",
lambda meters: safe_round(
meters["uem_recall_correct_pos_num"].sum
* 100.0
/ meters["uem_recall_total_pos_num"].sum,
3,
)
if meters["uem_recall_correct_pos_num"].sum > 0
else float("nan"),
)
metrics.log_derived(
"uem_precision",
lambda meters: safe_round(
meters["uem_precision_correct_pos_num"].sum
* 100.0
/ meters["uem_precision_total_pos_num"].sum,
3,
)
if meters["uem_precision_correct_pos_num"].sum > 0
else float("nan"),
)
corr_total_num = sum(log.get("corr_total_num", 0) for log in logging_outputs)
metrics.log_scalar("corr_total_num", corr_total_num)
corr_correct_num = sum(
log.get("corr_correct_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_correct_num", corr_correct_num)
corr_correct_pos_num = sum(
log.get("corr_correct_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_correct_pos_num", corr_correct_pos_num)
corr_total_pos_num = sum(
log.get("corr_total_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_total_pos_num", corr_total_pos_num)
if corr_correct_num > 0:
metrics.log_derived(
"corr_accuracy",
lambda meters: safe_round(
meters["corr_correct_num"].sum
* 100.0
/ meters["corr_total_num"].sum,
3,
)
if meters["corr_correct_num"].sum > 0
else float("nan"),
)
metrics.log_derived(
"corr_recall",
lambda meters: safe_round(
meters["corr_correct_pos_num"].sum
* 100.0
/ meters["corr_total_pos_num"].sum,
3,
)
if meters["corr_correct_pos_num"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 42,370
| 41.626761
| 111
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/cif_tf_correction_loss.py
|
# @Time : 2021/7/14
# @Author : Minglun Han
# @File : cif.py
import sys
import math
import editdistance
import numpy as np
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
import sklearn
from sklearn.metrics import auc
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
np.set_printoptions(threshold=100000)
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
"""
:param lprobs: log probabilities with shape B x T x V
:param target: targets with shape B x T
:param epsilon: Epsilon
:param ignore_index: padding index
:param reduce: whether sum all positions loss
:return: smoothed cross entropy loss
"""
# Calculate losses
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1) # B x T x 1
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
# Reduce losses
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
# Get final smoothed cross-entropy loss
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@dataclass
class CifCriterionConfig(FairseqDataclass):
# General settings
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="char",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
# Settings of cif losses
no_quantity_loss: bool = field(
default=False, metadata={"help": "apply quantity loss"}
)
no_ctc_loss: bool = field(default=False, metadata={"help": "apply ctc loss"})
apply_align_loss: bool = field(default=False, metadata={"help": "apply align loss"})
quantity_loss_lambda: float = field(
default=1.0, metadata={"help": "the interpolation weight of quantity loss"}
)
ctc_loss_lambda: float = field(
default=0.25, metadata={"help": "the interpolation weight of ctc loss"}
)
align_loss_lambda: float = field(
default=1.0,
metadata={"help": "the interpolation weight of ctc-constrained alignment loss"},
)
apply_label_smoothing: bool = field(
default=False,
metadata={"help": "apply label smoothing over cross entropy loss"},
)
label_smoothing_type: str = field(
default="uniform", metadata={"help": "specify the label smoothing type"}
)
label_smoothing_rate: float = field(
default=0.1, metadata={"help": "the rate of label smoothing"}
)
no_eos_label: bool = field(default=False)
apply_focal_loss: bool = field(default=False)
focal_loss_gamma: float = field(default=2.0)
no_comb_loss_sum: bool = field(default=False)
# uncertainty estimation loss (UE loss) settings
ue_loss_lambda: float = field(default=1.0)
apply_ue_focal_loss: bool = field(default=False)
ue_focal_scaling_weight: float = field(default=1.0)
ue_focal_loss_gamma: float = field(default=2.0)
# correction loss (Correction loss) settings
corr_loss_lambda: float = field(default=1.0)
apply_corr_focal_loss: bool = field(default=False)
corr_focal_scaling_weight: float = field(default=1.0)
corr_focal_loss_gamma: float = field(default=2.0)
@register_criterion("cif_tf_correction_loss", dataclass=CifCriterionConfig)
class CifCorrectionLoss(FairseqCriterion):
def __init__(self, cfg: CifCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index("<ctc_blank>")
if "<ctc_blank>" in task.target_dictionary.indices
else task.target_dictionary.bos()
)
self.pad_idx = task.target_dictionary.pad() # 1
self.eos_idx = task.target_dictionary.eos() # 2
self.bos_idx = task.target_dictionary.bos() # 0
self.no_correction_idx = len(task.target_dictionary)
self.post_process = cfg.post_process
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
# Register losses settings
self.apply_quantity_loss = not cfg.no_quantity_loss
self.apply_ctc_loss = not cfg.no_ctc_loss
self.apply_align_loss = cfg.apply_align_loss
self.quantity_loss_lambda = cfg.quantity_loss_lambda
self.ctc_loss_lambda = cfg.ctc_loss_lambda
self.align_loss_lambda = cfg.align_loss_lambda
# Register label smoothing settings
self.label_smoothing_type = cfg.label_smoothing_type
self.label_smoothing_rate = cfg.label_smoothing_rate
self.apply_label_smoothing = cfg.apply_label_smoothing
self.apply_focal_loss = cfg.apply_focal_loss
self.focal_loss_gamma = cfg.focal_loss_gamma
self.no_eos_label = cfg.no_eos_label
self.no_comb_loss_sum = cfg.no_comb_loss_sum
# Register correction loss settings
self.ue_loss_lambda = cfg.ue_loss_lambda
self.apply_ue_focal_loss = cfg.apply_ue_focal_loss
self.ue_focal_scaling_weight = cfg.ue_focal_scaling_weight
self.ue_focal_loss_gamma = cfg.ue_focal_loss_gamma
self.corr_loss_lambda = cfg.corr_loss_lambda
self.apply_corr_focal_loss = cfg.apply_corr_focal_loss
self.corr_focal_scaling_weight = cfg.corr_focal_scaling_weight
self.corr_focal_loss_gamma = cfg.corr_focal_loss_gamma
def get_loss(self, model, sample, net_output, reduce=True):
# Get model outputs
ctc_logits = net_output["ctc_logits"] # B x T x V
quantity_out = net_output["quantity_out"] # 1
decoder_out = net_output["decoder_out"][0]
ctc_align_outputs = net_output["ctc_align_outputs"]
non_padding_mask = ~net_output["encoder_padding_mask"]
input_lengths = non_padding_mask.int().sum(-1)
cif_out_padding_mask = net_output["cif_out_padding_mask"] # B x T
uem_logits = net_output["uem_logits"] # B x T
uem_labels = net_output["uem_labels"] # B x (1 + K) x T
cordec_logits = net_output[
"cordec_logits"
] # B x T x (V + 1), the extra one token is <no-cor> mark
cordec_labels = net_output["cordec_labels"] # B x (1 + K) x T
uem_padding_mask = net_output["uem_padding_mask"].bool() # B x T
# Collect targets and target_length for ctc loss and ce loss
target_lengths = sample["target_lengths"] # targets length without eos
target_with_eos = sample["target"]
target_with_eos_lengths = target_lengths # targets length with eos
if self.no_eos_label:
target_with_eos_lengths = target_with_eos_lengths - 1
target_with_eos = torch.where(
target_with_eos == self.eos_idx,
self.pad_idx * torch.ones_like(target_with_eos),
target_with_eos,
)
adjusted_target_with_eos = target_with_eos
# Calculate the ctc loss on encoder outputs
ctc_loss = torch.tensor(0.0)
if self.apply_ctc_loss:
pad_mask = adjusted_target_with_eos != self.pad_idx
targets_flat = adjusted_target_with_eos.masked_select(pad_mask)
ctc_lprobs = model.get_probs_from_logits(
ctc_logits, log_probs=True
).contiguous() # (B, T, V) from the encoder
target_lengths_for_ctc_loss = target_with_eos_lengths
with torch.backends.cudnn.flags(enabled=False):
ctc_loss = F.ctc_loss(
ctc_lprobs.transpose(0, 1), # T x B x v
targets_flat,
input_lengths,
target_lengths_for_ctc_loss,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
# Calculate the ctc alignment loss on cif accumulated weights
align_loss = torch.tensor(0.0)
if self.apply_align_loss and ctc_align_outputs is not None:
align_self_padding_mask = ~(
ctc_align_outputs.eq(0.0)
) # B x T, where padding locations are False
align_loss = torch.abs(ctc_align_outputs - 1.0) * align_self_padding_mask
align_loss = align_loss.sum()
# Calculate the quantity loss
qtt_loss = torch.tensor(0.0)
if self.apply_quantity_loss:
target_lengths_for_qtt_loss = (
target_with_eos_lengths # Lengths after adding eos token, [B]
)
qtt_loss = torch.abs(quantity_out - target_lengths_for_qtt_loss).sum()
# Calculate the cross-entropy loss
cif_max_len = cif_out_padding_mask.size(1) # Get max length of cif outputs
tgt_max_len = target_with_eos_lengths.max() # Get max length of targets
reg_min_len = min(
cif_max_len, tgt_max_len
) # Obtain the minimum length of cif length and target length
ce_logprobs = model.get_probs_from_logits(
decoder_out, log_probs=True
) # B x T x V
truncated_target = adjusted_target_with_eos[
:, :reg_min_len
] # Truncate target to reg_min_len, B x T
truncated_ce_logprobs = ce_logprobs[
:, :reg_min_len, :
] # Truncate ce probs to reg_min_len, B x T x V
if self.apply_focal_loss:
# truncated_ce_logprobs B x T x V
ce_probs = model.get_probs_from_logits(
decoder_out, log_probs=False
).contiguous()[
:, :reg_min_len, :
] # B x T x V
uncertainty = (1 - ce_probs) ** self.focal_loss_gamma # B x T x V
truncated_ce_logprobs = (
uncertainty * truncated_ce_logprobs
) # (1 - p_k) * log(p_k)
if not self.apply_label_smoothing:
truncated_ce_logprobs = truncated_ce_logprobs.contiguous().view(
-1, truncated_ce_logprobs.size(-1)
) # (B x T) x V
truncated_target = truncated_target.contiguous().view(
-1
) # flatten targets tensor, (B x T)
ce_loss = F.nll_loss(
truncated_ce_logprobs,
truncated_target.long(),
ignore_index=self.pad_idx,
reduction="sum" if reduce else "none",
) # CE loss is the summation of all tokens, without any form of averaging
else:
if self.label_smoothing_type == "uniform":
ce_loss, _ = label_smoothed_nll_loss(
truncated_ce_logprobs,
truncated_target.long(),
self.label_smoothing_rate,
self.pad_idx,
reduce=True if reduce else False,
)
else:
raise NotImplementedError(
"Invalid option: %s" % self.label_smoothing_type
)
# Stop all gradients from ASR losses
ce_loss = ce_loss.detach()
qtt_loss = qtt_loss.detach()
align_loss = align_loss.detach()
ctc_loss = ctc_loss.detach()
num_per_sample = uem_labels.size()[1]
uem_padding_mask = uem_padding_mask[:, :, :reg_min_len]
uem_padding_mask = uem_padding_mask.view(
-1, uem_padding_mask.size(-1)
) # (B x (1 + K)) x T
# Calculate the uncertainty estimation (UE) loss
ue_loss = torch.tensor(0.0)
org_uem_probs, org_uem_labels = None, None
if self.ue_loss_lambda != 0.0:
uem_labels = uem_labels[:, :, :reg_min_len]
uem_labels = uem_labels.view(-1, uem_labels.size(-1)) # (B x (1 + K)) x T
uem_logits = uem_logits[:, :reg_min_len] # B x T
# uem_logits = self.expand_tensor_dim(
# uem_logits, expand_size=num_per_sample, reduce=True) # (B x (1 + K)) x T
uem_probs = torch.sigmoid(uem_logits) # (B x (1 + K)) x T
org_uem_probs = uem_probs # B x T
org_uem_labels = uem_labels # B x T
scaling_weight = torch.ones_like(uem_labels)[uem_padding_mask] # B x T
if self.apply_ue_focal_loss:
scaling_weight = torch.where(
uem_labels == 1, 1 - uem_probs, uem_probs
) # (B x (1 + K)) x T
scaling_weight = self.ue_focal_scaling_weight * (
scaling_weight**self.ue_focal_loss_gamma
)
scaling_weight = scaling_weight[uem_padding_mask] # (B x (1 + K) x T)
uem_probs = uem_probs[uem_padding_mask] # (B x (1 + K) x T)
uem_labels = uem_labels[uem_padding_mask] # (B x (1 + K) x T)
ue_loss = F.binary_cross_entropy(
uem_probs.float(), uem_labels.float(), reduction="none"
)
ue_loss = (scaling_weight * ue_loss).sum()
# Calculate the correction cross-entropy (Corr-CE) loss
cordec_labels = cordec_labels[:, :, :reg_min_len].view(
-1, cordec_labels.size(-1)
) # (B x (1 + K)) x T
corr_loss = torch.tensor(0.0)
org_corr_probs, org_corr_labels = None, None
if self.corr_loss_lambda != 0.0:
corr_ce_labels = adjusted_target_with_eos[:, :reg_min_len] # B x T
corr_ce_labels = self.expand_tensor_dim(
corr_ce_labels, expand_size=num_per_sample, reduce=True
) # (B x (1 + K)) x T
corr_ce_labels = torch.where(
cordec_labels != self.no_correction_idx,
corr_ce_labels,
cordec_labels,
) # (B x (1 + K)) x T
org_corr_labels = corr_ce_labels
corr_ce_labels = corr_ce_labels.view(-1) # (B x (1 + K) x T)
corr_probs = model.get_probs_from_logits(
cordec_logits, log_probs=True
) # B x T x (V + 1)
org_corr_probs = corr_probs # (B x (1 + K)) x T x (V + 1), B x T x V
corr_probs = corr_probs[:, :reg_min_len, :].view(
-1, corr_probs.size(-1)
) # (B x (1 + K) x T) x (V + 1)
corr_scaling_weight = torch.ones_like(corr_ce_labels) # (B x T x V)
if self.apply_corr_focal_loss:
corr_real_probs = model.get_probs_from_logits(
cordec_logits, log_probs=False
) # B x T x (V + 1)
corr_real_probs = self.expand_tensor_dim(
corr_real_probs, expand_size=num_per_sample, reduce=True
) # (B x (1 + K)) x T x (V + 1)
corr_real_probs = corr_real_probs[:, :reg_min_len, :].view(
-1, corr_real_probs.size(-1)
) # (B x (1 + K) x T) x (V + 1)
corr_scaling_weight = F.one_hot(
corr_ce_labels, num_classes=corr_probs.size(-1)
) # (B x (1 + K) x T) x (V + 1)
corr_scaling_weight = (corr_scaling_weight * (1 - corr_real_probs)).sum(
-1
)
corr_scaling_weight = self.corr_focal_scaling_weight * (
corr_scaling_weight**self.corr_focal_loss_gamma
) # (B x (1 + K) x T)
corr_loss = F.nll_loss(
corr_probs.float(),
corr_ce_labels.long(),
ignore_index=self.pad_idx,
reduction="none",
)
corr_loss = (corr_scaling_weight * corr_loss).sum()
# Collect the number of tokens in current batch
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
ntokens_with_eos = target_with_eos_lengths.sum().item()
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
loss = ue_loss * self.ue_loss_lambda + corr_loss * self.corr_loss_lambda
# Build final logging outputs
logging_output = {
"loss": utils.item(loss.data),
"ce_loss": utils.item(ce_loss.data),
"ctc_loss": utils.item(ctc_loss.data),
"align_loss": utils.item(align_loss.data),
"quantity_loss": utils.item(qtt_loss.data),
"ue_loss": utils.item(ue_loss.data),
"corr_loss": utils.item(corr_loss.data),
"ntokens": ntokens,
"ntokens_with_eos": ntokens_with_eos,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
# Evaluate on valid sets
if not model.training:
with torch.no_grad():
lprobs_t = ce_logprobs.float().contiguous().cpu()
cif_lengths = cif_out_padding_mask.int().sum(dim=-1) # B x T
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
# Loop over all hypothesis
for lp, t, inp_l in zip(
lprobs_t, adjusted_target_with_eos, cif_lengths
):
lp = lp[:inp_l].unsqueeze(0)
# Process targets
# p = (t != self.task.target_dictionary.pad()) & (t != self.task.target_dictionary.eos())
p = t != self.task.target_dictionary.pad()
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
# Handle lp without elements
if min(lp.shape) == 0:
toks = targ
else:
toks = lp.argmax(dim=-1)
pred_units_arr = toks[
(toks != self.blank_idx) & (toks != self.pad_idx)
].tolist()
# Calculate character error
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
# Calculate word error
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
# Check uncertainty estimation accuracy on validation set
# if not model.training:
if self.ue_loss_lambda != 0.0:
with torch.no_grad():
uem_probs = org_uem_probs.float().contiguous().cpu() # B x T
uem_labels = org_uem_labels.int().contiguous().cpu() # B x T
uem_lens = uem_padding_mask.int().sum(dim=-1) # B
# Get softmax confidence score / uncertainty score
conf_ce_probs = model.get_probs_from_logits(
decoder_out, log_probs=False
)[
:, :reg_min_len, :
] # B x T x V
extracted_target = adjusted_target_with_eos[:, :reg_min_len] # B x T
onehot_extracted_target = F.one_hot(
extracted_target, num_classes=conf_ce_probs.size(-1)
) # B x T x V
conf_probs = (conf_ce_probs * onehot_extracted_target).sum(-1) # B x T
ue_sm_probs = 1 - conf_probs # B x T
ue_sm_probs = self.expand_tensor_dim(
ue_sm_probs, expand_size=num_per_sample, reduce=True
) # (B x (1+K)) x T
ue_sm_probs = ue_sm_probs.float().contiguous().cpu() # (B x (1+K)) x T
# Loop over all hypothesis
pred_pos_num, pred_neg_num = 0, 0
label_pos_num, label_neg_num = 0, 0
uem_total_num, uem_correct_num = 0, 0
uem_total_pos_num, uem_correct_pos_num = 0, 0
auc_labels, auc_probs, auc_sm_probs = [], [], []
for probs, sm_probs, label, max_len in zip(
uem_probs, ue_sm_probs, uem_labels, uem_lens
):
probs = probs[:max_len] # T
sm_probs = sm_probs[:max_len] # T
label = label[:max_len] # T
pred = (probs > 0.5).int()
pred_pos_num += (pred == 1).sum()
pred_neg_num += (pred != 1).sum()
label_pos_num += (label == 1).sum()
label_neg_num += (label != 1).sum()
label_pos_locations = (label == 1).bool()
pred_for_recall = pred[label_pos_locations]
label_for_recall = label[label_pos_locations]
uem_correct_pos_num += (pred_for_recall == label_for_recall).sum()
uem_total_pos_num += label_for_recall.size()[0]
comp_res = (pred == label).int()
uem_total_num += comp_res.size()[0]
uem_correct_num += (comp_res == 1).sum()
# Collect labels and probs for the calculation of AUC
auc_labels.append(label)
auc_probs.append(probs)
auc_sm_probs.append(sm_probs)
auc_labels = np.concatenate(auc_labels, axis=0)
auc_probs = np.concatenate(auc_probs, axis=0)
auc_sm_probs = np.concatenate(auc_sm_probs, axis=0)
try:
# For uem probs
uem_auc = roc_auc_score(auc_labels, auc_probs, average=None)
precision, recall, _ = precision_recall_curve(auc_labels, auc_probs)
uem_pr_auc = auc(recall, precision)
# For softmax probs
uem_sm_auc = roc_auc_score(auc_labels, auc_sm_probs, average=None)
precision, recall, _ = precision_recall_curve(
auc_labels, auc_sm_probs
)
uem_sm_pr_auc = auc(recall, precision)
except ValueError:
print("Encounter ValueError, ignore it.")
auc_labels[0] = 1
uem_auc = roc_auc_score(auc_labels, auc_probs, average=None)
precision, recall, _ = precision_recall_curve(auc_labels, auc_probs)
uem_pr_auc = auc(recall, precision)
# For softmax probs
uem_sm_auc = roc_auc_score(auc_labels, auc_sm_probs, average=None)
precision, recall, _ = precision_recall_curve(
auc_labels, auc_sm_probs
)
uem_sm_pr_auc = auc(recall, precision)
logging_output["uem_pred_pos_num"] = pred_pos_num.item()
logging_output["uem_pred_neg_num"] = pred_neg_num.item()
logging_output["uem_label_pos_num"] = label_pos_num.item()
logging_output["uem_label_neg_num"] = label_neg_num.item()
logging_output["uem_total_num"] = uem_total_num
logging_output["uem_total_pos_num"] = uem_total_pos_num
logging_output["uem_correct_num"] = uem_correct_num.item()
logging_output["uem_correct_pos_num"] = uem_correct_pos_num.item()
logging_output["uem_auc"] = uem_auc
logging_output["uem_pr_auc"] = uem_pr_auc
logging_output["uem_sm_auc"] = uem_sm_auc
logging_output["uem_sm_pr_auc"] = uem_sm_pr_auc
# Check correction outputs accuracy on validation set
# if not model.training:
if self.corr_loss_lambda != 0.0:
with torch.no_grad():
corr_probs = org_corr_probs.float().contiguous().cpu() # B x T x V
corr_preds = torch.argmax(corr_probs, dim=-1) # B x T
cordec_labels = org_corr_labels.int().contiguous().cpu() # B x T
uem_lens = uem_padding_mask.int().sum(dim=-1) # B
# Loop over all hypothesis
corr_total_num, corr_correct_num = 0, 0
corr_total_pos_num, corr_correct_pos_num = 0, 0
for pred, label, max_len in zip(corr_preds, cordec_labels, uem_lens):
pred = pred[:max_len] # T
label = label[:max_len] # T
comp_res = (pred == label).int()
corr_total_num += comp_res.size()[0]
corr_correct_num += (comp_res == 1).sum()
label_pos_locations = (label != self.no_correction_idx).bool()
label_for_recall = label[label_pos_locations]
pred_for_recall = pred[label_pos_locations]
corr_correct_pos_num += (pred_for_recall == label_for_recall).sum()
corr_total_pos_num += label_for_recall.size()[0]
logging_output["corr_total_num"] = corr_total_num
logging_output["corr_correct_num"] = corr_correct_num.item()
logging_output["corr_total_pos_num"] = corr_total_pos_num
logging_output["corr_correct_pos_num"] = corr_correct_pos_num.item()
# Calculate the total loss
if self.no_comb_loss_sum:
loss = (
ue_loss * self.ue_loss_lambda / ntokens
+ corr_loss * self.corr_loss_lambda / ntokens
)
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True):
# forward the whole model
net_output = model(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
target_lengths=sample["target_lengths"],
target=sample["target"],
)
loss, sample_size, logging_output = self.get_loss(
model, sample, net_output, reduce=True
)
return loss, sample_size, logging_output
def expand_tensor_dim(self, x, expand_size, target_dim=1, reduce=False):
assert (
target_dim == 1
), "only the expansion at the second dimension is available."
rank = len(x.size())
unsq_x = x.unsqueeze(target_dim)
if rank == 1:
sz1 = x.size()
x = unsq_x.repeat(1, expand_size)
x = x.view(sz1 * expand_size) if reduce else x
elif rank == 2:
sz1, sz2 = x.size()
x = unsq_x.repeat(1, expand_size, 1)
x = x.view((sz1 * expand_size), sz2) if reduce else x
elif rank == 3:
sz1, sz2, sz3 = x.size()
x = unsq_x.repeat(1, expand_size, 1, 1)
x = x.view((sz1 * expand_size), sz2, sz3) if reduce else x
else:
raise NotImplementedError("Not supported rank %d" % rank)
return x
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""
Aggregate logging outputs from data parallel training.
"""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ce_loss_sum = utils.item(sum(log.get("ce_loss", 0) for log in logging_outputs))
ctc_loss_sum = utils.item(
sum(log.get("ctc_loss", 0) for log in logging_outputs)
)
align_loss_sum = utils.item(
sum(log.get("align_loss", 0) for log in logging_outputs)
)
quantity_loss_sum = utils.item(
sum(log.get("quantity_loss", 0) for log in logging_outputs)
)
ue_loss_sum = utils.item(sum(log.get("ue_loss", 0) for log in logging_outputs))
corr_loss_sum = utils.item(
sum(log.get("corr_loss", 0) for log in logging_outputs)
)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
ntokens_with_eos = utils.item(
sum(log.get("ntokens_with_eos", 0) for log in logging_outputs)
)
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"ce_loss", ce_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"ctc_loss", ctc_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"quantity_loss",
quantity_loss_sum / sample_size / math.log(2),
sample_size,
round=5,
)
metrics.log_scalar(
"align_loss",
align_loss_sum / sample_size / math.log(2),
sample_size,
round=5,
)
metrics.log_scalar(
"ue_loss", ue_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar(
"corr_loss", corr_loss_sum / sample_size / math.log(2), sample_size, round=5
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("ntokens_with_eos", ntokens_with_eos)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["c_errors"].sum * 100.0 / meters["c_total"].sum, 3
)
if meters["c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["w_errors"].sum * 100.0 / meters["w_total"].sum, 3
)
if meters["w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["wv_errors"].sum * 100.0 / meters["w_total"].sum, 3
)
if meters["w_total"].sum > 0
else float("nan"),
)
uem_pred_pos_num = sum(
log.get("uem_pred_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_pred_pos_num", uem_pred_pos_num)
uem_pred_neg_num = sum(
log.get("uem_pred_neg_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_pred_neg_num", uem_pred_neg_num)
uem_label_pos_num = sum(
log.get("uem_label_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_label_pos_num", uem_label_pos_num)
uem_label_neg_num = sum(
log.get("uem_label_neg_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_label_neg_num", uem_label_neg_num)
uem_total_num = sum(log.get("uem_total_num", 0) for log in logging_outputs)
metrics.log_scalar("uem_total_num", uem_total_num)
uem_correct_num = sum(log.get("uem_correct_num", 0) for log in logging_outputs)
metrics.log_scalar("uem_correct_num", uem_correct_num)
uem_correct_pos_num = sum(
log.get("uem_correct_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_correct_pos_num", uem_correct_pos_num)
uem_total_pos_num = sum(
log.get("uem_total_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("uem_total_pos_num", uem_total_pos_num)
uem_auc_sum = sum(log.get("uem_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_auc_sum", uem_auc_sum)
uem_pr_auc_sum = sum(log.get("uem_pr_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_pr_auc_sum", uem_pr_auc_sum)
uem_sm_auc_sum = sum(log.get("uem_sm_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_sm_auc_sum", uem_sm_auc_sum)
uem_sm_pr_auc_sum = sum(log.get("uem_sm_pr_auc", 0) for log in logging_outputs)
metrics.log_scalar("uem_sm_pr_auc_sum", uem_sm_pr_auc_sum)
if uem_correct_num > 0:
metrics.log_derived(
"uem_accuracy",
lambda meters: safe_round(
meters["uem_correct_num"].sum * 100.0 / meters["uem_total_num"].sum,
3,
)
if meters["uem_correct_num"].sum > 0
else float("nan"),
)
metrics.log_derived(
"uem_recall",
lambda meters: safe_round(
meters["uem_correct_pos_num"].sum
* 100.0
/ meters["uem_total_pos_num"].sum,
3,
)
if meters["uem_correct_pos_num"].sum > 0
else float("nan"),
)
corr_total_num = sum(log.get("corr_total_num", 0) for log in logging_outputs)
metrics.log_scalar("corr_total_num", corr_total_num)
corr_correct_num = sum(
log.get("corr_correct_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_correct_num", corr_correct_num)
corr_correct_pos_num = sum(
log.get("corr_correct_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_correct_pos_num", corr_correct_pos_num)
corr_total_pos_num = sum(
log.get("corr_total_pos_num", 0) for log in logging_outputs
)
metrics.log_scalar("corr_total_pos_num", corr_total_pos_num)
if corr_correct_num > 0:
metrics.log_derived(
"corr_accuracy",
lambda meters: safe_round(
meters["corr_correct_num"].sum
* 100.0
/ meters["corr_total_num"].sum,
3,
)
if meters["corr_correct_num"].sum > 0
else float("nan"),
)
metrics.log_derived(
"corr_recall",
lambda meters: safe_round(
meters["corr_correct_pos_num"].sum
* 100.0
/ meters["corr_total_pos_num"].sum,
3,
)
if meters["corr_correct_pos_num"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 37,173
| 42.32634
| 109
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/label_smoothed_cross_entropy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
sentence_avg: bool = II("optimization.sentence_avg")
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion(
"label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig
)
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size=0,
report_accuracy=False,
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# # Check: inspect LM loading process and LM model
# logging.info(" Checking language model ...... ")
# model.eval()
# # dummy_inputs = torch.tensor(
# # [[2,38,817,72,220,80,594,168,
# # 29,19,17,42,146,518,436]]
# # ).cuda() # For validation
# dummy_inputs = torch.tensor(
# [[2, 320, 1018, 1090, 553]]
# ).cuda() # For training
# dummy_lm_logits, _ = model(src_tokens=dummy_inputs)
# dummy_preds = dummy_lm_logits.max(-1).indices
# dummy_logprobs = utils.log_softmax(
# dummy_lm_logits.float(), dim=-1)
# dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.pad, reduction="mean")
# logging.info(f"dummy_inputs: {dummy_inputs[1:]}")
# logging.info(f"dummy_preds: {dummy_preds}")
# logging.info(f"dummy_nll_loss: {dummy_nll_loss}")
# logging.info(f"Language model inspection is done.")
net_output = model(**sample["net_input"])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": loss.data,
"nll_loss": nll_loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output, sample)
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = utils.item(total.data)
return loss, sample_size, logging_output
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_scalar(
"normal_nll_loss", nll_loss_sum / ntokens / math.log2(2), ntokens, round=3
)
metrics.log_derived(
"ppl",
lambda meters: utils.get_perplexity(meters["nll_loss"].avg, base=math.e),
)
metrics.log_derived(
"normal_ppl",
lambda meters: utils.get_perplexity(
meters["normal_nll_loss"].avg, base=math.e
),
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 7,830
| 36.830918
| 86
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/sentence_ranking.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_ranking")
class SentenceRankingCriterion(FairseqCriterion):
def __init__(self, task, ranking_head_name, save_predictions, num_classes):
super().__init__(task)
self.ranking_head_name = ranking_head_name
if save_predictions is not None:
self.prediction_h = open(save_predictions, "w")
else:
self.prediction_h = None
self.num_classes = num_classes
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
parser.add_argument('--ranking-head-name',
default='sentence_classification_head',
help='name of the ranking head to use')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute ranking loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.ranking_head_name in model.classification_heads
), "model must provide sentence ranking head for --criterion=sentence_ranking"
scores = []
for idx in range(self.num_classes):
score, _ = model(
**sample["net_input{idx}".format(idx=idx + 1)],
classification_head_name=self.ranking_head_name,
)
scores.append(score)
logits = torch.cat(scores, dim=1)
sample_size = logits.size(0)
if "target" in sample:
targets = model.get_targets(sample, [logits]).view(-1)
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
else:
targets = None
loss = torch.tensor(0.0, requires_grad=True)
if self.prediction_h is not None:
preds = logits.argmax(dim=1)
for i, (id, pred) in enumerate(zip(sample["id"].tolist(), preds.tolist())):
if targets is not None:
label = targets[i].item()
print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
else:
print("{}\t{}".format(id, pred), file=self.prediction_h)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if targets is not None:
logging_output["ncorrect"] = (logits.argmax(dim=1) == targets).sum()
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar(
"accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 4,614
| 37.140496
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/criterions/composite_loss.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from torch import nn
@register_criterion("composite_loss")
class CompositeLoss(LegacyFairseqCriterion):
"""This is a composite loss that, given a list of model outputs and a list of targets,
computes an average of losses for each output-target pair"""
def __init__(self, args, task):
super().__init__(args, task)
self.underlying_criterion = args.underlying_criterion
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True,
help='underlying criterion to use for the composite loss')
# fmt: on
@staticmethod
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert saved_criterion != args.underlying_criterion
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
@classmethod
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(
net_output, log_probs, sample=sample
)
def get_targets(self, *unused):
return self.target
@property
def decoder(self):
return self.model.decoder
class _CompositeLoss(LegacyFairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample["net_input"])
targets = sample["target"]
bsz = targets[0].size(0)
loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_()
sample_size = 0
logging_output = {}
for o, t in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample["target"] = t
l, ss, logging_output = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output["loss"] = utils.item(loss.data) if reduce else loss.data
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(
logging_outputs
)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
underlying_criterion.__class__.reduce_metrics(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion)
| 3,793
| 36.564356
| 93
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/multilingual_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from fairseq import utils
from fairseq.models import (
FairseqMultiModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture,
)
from fairseq.utils import safe_hasattr
@register_model("multilingual_transformer")
class MultilingualTransformerModel(FairseqMultiModel):
"""Train Transformer models for multiple language pairs simultaneously.
Requires `--task multilingual_translation`.
We inherit all arguments from TransformerModel and assume that all language
pairs use a single Transformer architecture. In addition, we provide several
options that are specific to the multilingual setting.
Args:
--share-encoder-embeddings: share encoder embeddings across all source languages
--share-decoder-embeddings: share decoder embeddings across all target languages
--share-encoders: share all encoder params (incl. embeddings) across all source languages
--share-decoders: share all decoder params (incl. embeddings) across all target languages
"""
def __init__(self, encoders, decoders):
super().__init__(encoders, decoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--share-encoder-embeddings",
action="store_true",
help="share encoder embeddings across languages",
)
parser.add_argument(
"--share-decoder-embeddings",
action="store_true",
help="share decoder embeddings across languages",
)
parser.add_argument(
"--share-encoders",
action="store_true",
help="share encoders across languages",
)
parser.add_argument(
"--share-decoders",
action="store_true",
help="share decoders across languages",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
assert isinstance(task, MultilingualTranslationTask)
# make sure all arguments are present in older models
base_multilingual_architecture(args)
if not safe_hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not safe_hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_langs = [lang_pair.split("-")[0] for lang_pair in task.model_lang_pairs]
tgt_langs = [lang_pair.split("-")[1] for lang_pair in task.model_lang_pairs]
if args.share_encoders:
args.share_encoder_embeddings = True
if args.share_decoders:
args.share_decoder_embeddings = True
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
# build shared embeddings (if applicable)
shared_encoder_embed_tokens, shared_decoder_embed_tokens = None, None
if args.share_all_embeddings:
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=task.langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
shared_decoder_embed_tokens = shared_encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
if args.share_encoder_embeddings:
shared_encoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=src_langs,
embed_dim=args.encoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.encoder_embed_path,
)
if args.share_decoder_embeddings:
shared_decoder_embed_tokens = FairseqMultiModel.build_shared_embeddings(
dicts=task.dicts,
langs=tgt_langs,
embed_dim=args.decoder_embed_dim,
build_embedding=build_embedding,
pretrained_embed_path=args.decoder_embed_path,
)
# encoders/decoders for each language
lang_encoders, lang_decoders = {}, {}
def get_encoder(lang):
if lang not in lang_encoders:
if shared_encoder_embed_tokens is not None:
encoder_embed_tokens = shared_encoder_embed_tokens
else:
encoder_embed_tokens = build_embedding(
task.dicts[lang],
args.encoder_embed_dim,
args.encoder_embed_path,
)
lang_encoders[lang] = cls._get_module_class(
True, args, task.dicts[lang], encoder_embed_tokens, src_langs
)
return lang_encoders[lang]
def get_decoder(lang):
if lang not in lang_decoders:
if shared_decoder_embed_tokens is not None:
decoder_embed_tokens = shared_decoder_embed_tokens
else:
decoder_embed_tokens = build_embedding(
task.dicts[lang],
args.decoder_embed_dim,
args.decoder_embed_path,
)
lang_decoders[lang] = cls._get_module_class(
False, args, task.dicts[lang], decoder_embed_tokens, tgt_langs
)
return lang_decoders[lang]
# shared encoders/decoders (if applicable)
shared_encoder, shared_decoder = None, None
if args.share_encoders:
shared_encoder = get_encoder(src_langs[0])
if args.share_decoders:
shared_decoder = get_decoder(tgt_langs[0])
encoders, decoders = OrderedDict(), OrderedDict()
for lang_pair, src, tgt in zip(task.model_lang_pairs, src_langs, tgt_langs):
encoders[lang_pair] = (
shared_encoder if shared_encoder is not None else get_encoder(src)
)
decoders[lang_pair] = (
shared_decoder if shared_decoder is not None else get_decoder(tgt)
)
return MultilingualTransformerModel(encoders, decoders)
@classmethod
def _get_module_class(cls, is_encoder, args, lang_dict, embed_tokens, langs):
module_class = TransformerEncoder if is_encoder else TransformerDecoder
return module_class(args, lang_dict, embed_tokens)
def load_state_dict(self, state_dict, strict=True, model_cfg=None):
state_dict_subset = state_dict.copy()
for k, _ in state_dict.items():
assert k.startswith("models.")
lang_pair = k.split(".")[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, model_cfg=model_cfg)
@register_model_architecture("multilingual_transformer", "multilingual_transformer")
def base_multilingual_architecture(args):
base_architecture(args)
args.share_encoder_embeddings = getattr(args, "share_encoder_embeddings", False)
args.share_decoder_embeddings = getattr(args, "share_decoder_embeddings", False)
args.share_encoders = getattr(args, "share_encoders", False)
args.share_decoders = getattr(args, "share_decoders", False)
@register_model_architecture(
"multilingual_transformer", "multilingual_transformer_iwslt_de_en"
)
def multilingual_transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_multilingual_architecture(args)
| 9,570
| 40.613043
| 102
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/lstm_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.lstm import Embedding, LSTMDecoder
DEFAULT_MAX_TARGET_POSITIONS = 1e5
@register_model("lstm_lm")
class LSTMLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--residuals', default=False,
action='store_true',
help='applying residuals between LSTM layers')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
parser.add_argument('--share-decoder-input-output-embed', default=False,
action='store_true',
help='share decoder input and output embeddings')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if getattr(args, "max_target_positions", None) is not None:
max_target_positions = args.max_target_positions
else:
max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim
)
if args.share_decoder_input_output_embed:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise ValueError(
"--share-decoder-input-output-embeddings requires a joint dictionary"
)
if args.decoder_embed_dim != args.decoder_out_embed_dim:
raise ValueError(
"--share-decoder-input-output-embeddings requires "
"--decoder-embed-dim to match --decoder-out-embed-dim"
)
decoder = LSTMDecoder(
dictionary=task.dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=False, # decoder-only language model doesn't support attention
encoder_output_units=0,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
max_target_positions=max_target_positions,
residuals=args.residuals,
)
return cls(decoder)
@register_model_architecture("lstm_lm", "lstm_lm")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_hidden_size = getattr(
args, "decoder_hidden_size", args.decoder_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 1)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
args.decoder_attention = getattr(args, "decoder_attention", "0")
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
args.residuals = getattr(args, "residuals", False)
| 6,423
| 43.923077
| 91
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer_align.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerModel,
base_architecture,
transformer_wmt_en_de_big,
)
@register_model("transformer_align")
class TransformerAlignModel(TransformerModel):
"""
See "Jointly Learning to Align and Translate with Transformer
Models" (Garg et al., EMNLP 2019).
"""
def __init__(self, encoder, decoder, args):
super().__init__(args, encoder, decoder)
self.alignment_heads = args.alignment_heads
self.alignment_layer = args.alignment_layer
self.full_context_alignment = args.full_context_alignment
@staticmethod
def add_args(parser):
# fmt: off
super(TransformerAlignModel, TransformerAlignModel).add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='D',
help='Number of cross attention heads per layer to supervised with alignments')
parser.add_argument('--alignment-layer', type=int, metavar='D',
help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')
parser.add_argument('--full-context-alignment', action='store_true',
help='Whether or not alignment is supervised conditioned on the full target context.')
# fmt: on
@classmethod
def build_model(cls, args, task):
# set any default arguments
transformer_align(args)
transformer_model = TransformerModel.build_model(args, task)
return TransformerAlignModel(
transformer_model.encoder, transformer_model.decoder, args
)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens, src_lengths)
return self.forward_decoder(prev_output_tokens, encoder_out)
def forward_decoder(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
features_only=False,
**extra_args,
):
attn_args = {
"alignment_layer": self.alignment_layer,
"alignment_heads": self.alignment_heads,
}
decoder_out = self.decoder(prev_output_tokens, encoder_out, **attn_args)
if self.full_context_alignment:
attn_args["full_context_alignment"] = self.full_context_alignment
_, alignment_out = self.decoder(
prev_output_tokens,
encoder_out,
features_only=True,
**attn_args,
**extra_args,
)
decoder_out[1]["attn"] = alignment_out["attn"]
return decoder_out
@register_model_architecture("transformer_align", "transformer_align")
def transformer_align(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", 4)
args.full_context_alignment = getattr(args, "full_context_alignment", False)
base_architecture(args)
@register_model_architecture("transformer_align", "transformer_wmt_en_de_big_align")
def transformer_wmt_en_de_big_align(args):
args.alignment_heads = getattr(args, "alignment_heads", 1)
args.alignment_layer = getattr(args, "alignment_layer", 4)
transformer_wmt_en_de_big(args)
| 3,532
| 36.585106
| 117
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer_from_pretrained_xlm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict
from fairseq import checkpoint_utils
from fairseq.data.legacy.masked_lm_dictionary import MaskedLMDictionary
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
base_architecture as transformer_base_architecture,
)
@register_model("transformer_from_pretrained_xlm")
class TransformerFromPretrainedXLMModel(TransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-xlm-checkpoint",
type=str,
metavar="STR",
help="XLM model to use for initializing transformer encoder and/or decoder",
)
parser.add_argument(
"--init-encoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into decoder",
)
parser.add_argument(
"--init-decoder-only",
action="store_true",
help="if set, don't load the XLM weights and embeddings into encoder",
)
@classmethod
def build_model(self, args, task, cls_dictionary=MaskedLMDictionary):
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"You must specify a path for --pretrained-xlm-checkpoint to use "
"--arch transformer_from_pretrained_xlm"
)
assert isinstance(task.source_dictionary, cls_dictionary) and isinstance(
task.target_dictionary, cls_dictionary
), (
"You should use a MaskedLMDictionary when using --arch "
"transformer_from_pretrained_xlm because the pretrained XLM model "
"was trained using data binarized with MaskedLMDictionary. "
"For translation, you may want to use --task "
"translation_from_pretrained_xlm"
)
assert not (
getattr(args, "init_encoder_only", False)
and getattr(args, "init_decoder_only", False)
), "Only one of --init-encoder-only and --init-decoder-only can be set."
return super().build_model(args, task)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoderFromPretrainedXLM(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoderFromPretrainedXLM(args, tgt_dict, embed_tokens)
def upgrade_state_dict_with_xlm_weights(
state_dict: Dict[str, Any], pretrained_xlm_checkpoint: str
) -> Dict[str, Any]:
"""
Load XLM weights into a Transformer encoder or decoder model.
Args:
state_dict: state dict for either TransformerEncoder or
TransformerDecoder
pretrained_xlm_checkpoint: checkpoint to load XLM weights from
Raises:
AssertionError: If architecture (num layers, attention heads, etc.)
does not match between the current Transformer encoder or
decoder and the pretrained_xlm_checkpoint
"""
if not os.path.exists(pretrained_xlm_checkpoint):
raise IOError("Model file not found: {}".format(pretrained_xlm_checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(pretrained_xlm_checkpoint)
xlm_state_dict = state["model"]
for key in xlm_state_dict.keys():
for search_key in ["embed_tokens", "embed_positions", "layers"]:
if search_key in key:
subkey = key[key.find(search_key) :]
assert subkey in state_dict, (
"{} Transformer encoder / decoder "
"state_dict does not contain {}. Cannot "
"load {} from pretrained XLM checkpoint "
"{} into Transformer.".format(
str(state_dict.keys()), subkey, key, pretrained_xlm_checkpoint
)
)
state_dict[subkey] = xlm_state_dict[key]
return state_dict
class TransformerEncoderFromPretrainedXLM(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "init_decoder_only", False):
# Don't load XLM weights for encoder if --init-decoder-only
return
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"--pretrained-xlm-checkpoint must be specified to load Transformer "
"encoder from pretrained XLM"
)
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
state_dict=self.state_dict(),
pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
class TransformerDecoderFromPretrainedXLM(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
if getattr(args, "init_encoder_only", False):
# Don't load XLM weights for decoder if --init-encoder-only
return
assert hasattr(args, "pretrained_xlm_checkpoint"), (
"--pretrained-xlm-checkpoint must be specified to load Transformer "
"decoder from pretrained XLM"
)
xlm_loaded_state_dict = upgrade_state_dict_with_xlm_weights(
state_dict=self.state_dict(),
pretrained_xlm_checkpoint=args.pretrained_xlm_checkpoint,
)
self.load_state_dict(xlm_loaded_state_dict, strict=True)
@register_model_architecture(
"transformer_from_pretrained_xlm", "transformer_from_pretrained_xlm"
)
def base_architecture(args):
transformer_base_architecture(args)
| 6,076
| 38.718954
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/lstm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import AdaptiveSoftmax, FairseqDropout
from torch import Tensor
DEFAULT_MAX_SOURCE_POSITIONS = 1e5
DEFAULT_MAX_TARGET_POSITIONS = 1e5
@register_model("lstm")
class LSTMModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-freeze-embed', action='store_true',
help='freeze encoder embeddings')
parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
help='encoder hidden size')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='number of encoder layers')
parser.add_argument('--encoder-bidirectional', action='store_true',
help='make all layers of encoder bidirectional')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-freeze-embed', action='store_true',
help='freeze decoder embeddings')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--share-decoder-input-output-embed', default=False,
action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
help='dropout probability for encoder input embedding')
parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
help='dropout probability for encoder output')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
if args.encoder_layers != args.decoder_layers:
raise ValueError("--encoder-layers must match --decoder-layers")
max_source_positions = getattr(
args, "max_source_positions", DEFAULT_MAX_SOURCE_POSITIONS
)
max_target_positions = getattr(
args, "max_target_positions", DEFAULT_MAX_TARGET_POSITIONS
)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim
)
else:
num_embeddings = len(task.source_dictionary)
pretrained_encoder_embed = Embedding(
num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
)
if args.share_all_embeddings:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise ValueError("--share-all-embeddings requires a joint dictionary")
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embed not compatible with --decoder-embed-path"
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to "
"match --decoder-embed-dim"
)
pretrained_decoder_embed = pretrained_encoder_embed
args.share_decoder_input_output_embed = True
else:
# separate decoder input embeddings
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path,
task.target_dictionary,
args.decoder_embed_dim,
)
# one last double check of parameter combinations
if args.share_decoder_input_output_embed and (
args.decoder_embed_dim != args.decoder_out_embed_dim
):
raise ValueError(
"--share-decoder-input-output-embeddings requires "
"--decoder-embed-dim to match --decoder-out-embed-dim"
)
if args.encoder_freeze_embed:
pretrained_encoder_embed.weight.requires_grad = False
if args.decoder_freeze_embed:
pretrained_decoder_embed.weight.requires_grad = False
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
max_source_positions=max_source_positions,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=utils.eval_bool(args.decoder_attention),
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
max_target_positions=max_target_positions,
residuals=False,
)
return cls(encoder, decoder)
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
)
return decoder_out
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
bidirectional=False,
left_pad=True,
pretrained_embed=None,
padding_idx=None,
max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in_module = FairseqDropout(
dropout_in * 1.0, module_name=self.__class__.__name__
)
self.dropout_out_module = FairseqDropout(
dropout_out * 1.0, module_name=self.__class__.__name__
)
self.bidirectional = bidirectional
self.hidden_size = hidden_size
self.max_source_positions = max_source_positions
num_embeddings = len(dictionary)
self.padding_idx = padding_idx if padding_idx is not None else dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out_module.p if num_layers > 1 else 0.0,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(
self,
src_tokens: Tensor,
src_lengths: Tensor,
enforce_sorted: bool = True,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of
shape `(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of
shape `(batch)`
enforce_sorted (bool, optional): if True, `src_tokens` is
expected to contain sequences sorted by length in a
decreasing order. If False, this condition is not
required. Default: True.
"""
if self.left_pad:
# nn.utils.rnn.pack_padded_sequence requires right-padding;
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
torch.zeros_like(src_tokens).fill_(self.padding_idx),
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = self.dropout_in_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(
x, src_lengths.cpu(), enforce_sorted=enforce_sorted
)
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.new_zeros(*state_size)
c0 = x.new_zeros(*state_size)
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(
packed_outs, padding_value=self.padding_idx * 1.0
)
x = self.dropout_out_module(x)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
final_hiddens = self.combine_bidir(final_hiddens, bsz)
final_cells = self.combine_bidir(final_cells, bsz)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
return tuple(
(
x, # seq_len x batch x hidden
final_hiddens, # num_layers x batch x num_directions*hidden
final_cells, # num_layers x batch x num_directions*hidden
encoder_padding_mask, # seq_len x batch
)
)
def combine_bidir(self, outs, bsz: int):
out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()
return out.view(self.num_layers, bsz, -1)
def reorder_encoder_out(
self, encoder_out: Tuple[Tensor, Tensor, Tensor, Tensor], new_order
):
return tuple(
(
encoder_out[0].index_select(1, new_order),
encoder_out[1].index_select(1, new_order),
encoder_out[2].index_select(1, new_order),
encoder_out[3].index_select(1, new_order),
)
)
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.max_source_positions
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, source_embed_dim, output_embed_dim, bias=False):
super().__init__()
self.input_proj = Linear(input_embed_dim, source_embed_dim, bias=bias)
self.output_proj = Linear(
input_embed_dim + source_embed_dim, output_embed_dim, bias=bias
)
def forward(self, input, source_hids, encoder_padding_mask):
# input: bsz x input_embed_dim
# source_hids: srclen x bsz x source_embed_dim
# x: bsz x source_embed_dim
x = self.input_proj(input)
# compute attention
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
# don't attend over padding
if encoder_padding_mask is not None:
attn_scores = (
attn_scores.float()
.masked_fill_(encoder_padding_mask, float("-inf"))
.type_as(attn_scores)
) # FP16 support: cast to float and back
attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
# sum weighted sources
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = torch.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self,
dictionary,
embed_dim=512,
hidden_size=512,
out_embed_dim=512,
num_layers=1,
dropout_in=0.1,
dropout_out=0.1,
attention=True,
encoder_output_units=512,
pretrained_embed=None,
share_input_output_embed=False,
adaptive_softmax_cutoff=None,
max_target_positions=DEFAULT_MAX_TARGET_POSITIONS,
residuals=False,
):
super().__init__(dictionary)
self.dropout_in_module = FairseqDropout(
dropout_in * 1.0, module_name=self.__class__.__name__
)
self.dropout_out_module = FairseqDropout(
dropout_out * 1.0, module_name=self.__class__.__name__
)
self.hidden_size = hidden_size
self.share_input_output_embed = share_input_output_embed
self.need_attn = True
self.max_target_positions = max_target_positions
self.residuals = residuals
self.num_layers = num_layers
self.adaptive_softmax = None
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.encoder_output_units = encoder_output_units
if encoder_output_units != hidden_size and encoder_output_units != 0:
self.encoder_hidden_proj = Linear(encoder_output_units, hidden_size)
self.encoder_cell_proj = Linear(encoder_output_units, hidden_size)
else:
self.encoder_hidden_proj = self.encoder_cell_proj = None
# disable input feeding if there is no encoder
# input feeding is described in arxiv.org/abs/1508.04025
input_feed_size = 0 if encoder_output_units == 0 else hidden_size
self.layers = nn.ModuleList(
[
LSTMCell(
input_size=input_feed_size + embed_dim
if layer == 0
else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
]
)
if attention:
# TODO make bias configurable
self.attention = AttentionLayer(
hidden_size, encoder_output_units, hidden_size, bias=False
)
else:
self.attention = None
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
if adaptive_softmax_cutoff is not None:
# setting adaptive_softmax dropout to dropout_out for now but can be redefined
self.adaptive_softmax = AdaptiveSoftmax(
num_embeddings,
hidden_size,
adaptive_softmax_cutoff,
dropout=dropout_out,
)
elif not self.share_input_output_embed:
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
src_lengths: Optional[Tensor] = None,
):
x, attn_scores = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
return self.output_layer(x), attn_scores
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Tuple[Tensor, Tensor, Tensor, Tensor]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
"""
Similar to *forward* but only return features.
"""
# get outputs from encoder
if encoder_out is not None:
encoder_outs = encoder_out[0]
encoder_hiddens = encoder_out[1]
encoder_cells = encoder_out[2]
encoder_padding_mask = encoder_out[3]
else:
encoder_outs = torch.empty(0)
encoder_hiddens = torch.empty(0)
encoder_cells = torch.empty(0)
encoder_padding_mask = torch.empty(0)
srclen = encoder_outs.size(0)
if incremental_state is not None and len(incremental_state) > 0:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = self.dropout_in_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
if incremental_state is not None and len(incremental_state) > 0:
prev_hiddens, prev_cells, input_feed = self.get_cached_state(
incremental_state
)
elif encoder_out is not None:
# setup recurrent cells
prev_hiddens = [encoder_hiddens[i] for i in range(self.num_layers)]
prev_cells = [encoder_cells[i] for i in range(self.num_layers)]
if self.encoder_hidden_proj is not None:
prev_hiddens = [self.encoder_hidden_proj(y) for y in prev_hiddens]
prev_cells = [self.encoder_cell_proj(y) for y in prev_cells]
input_feed = x.new_zeros(bsz, self.hidden_size)
else:
# setup zero cells, since there is no encoder
zero_state = x.new_zeros(bsz, self.hidden_size)
prev_hiddens = [zero_state for i in range(self.num_layers)]
prev_cells = [zero_state for i in range(self.num_layers)]
input_feed = None
assert (
srclen > 0 or self.attention is None
), "attention is not supported if there are no encoder outputs"
attn_scores: Optional[Tensor] = (
x.new_zeros(srclen, seqlen, bsz) if self.attention is not None else None
)
outs = []
for j in range(seqlen):
# input feeding: concatenate context vector from previous time step
if input_feed is not None:
input = torch.cat((x[j, :, :], input_feed), dim=1)
else:
input = x[j]
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = self.dropout_out_module(hidden)
if self.residuals:
input = input + prev_hiddens[i]
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
# apply attention using the last layer's hidden state
if self.attention is not None:
assert attn_scores is not None
out, attn_scores[:, j, :] = self.attention(
hidden, encoder_outs, encoder_padding_mask
)
else:
out = hidden
out = self.dropout_out_module(out)
# input feeding
if input_feed is not None:
input_feed = out
# save final output
outs.append(out)
# Stack all the necessary tensors together and store
prev_hiddens_tensor = torch.stack(prev_hiddens)
prev_cells_tensor = torch.stack(prev_cells)
cache_state = torch.jit.annotate(
Dict[str, Optional[Tensor]],
{
"prev_hiddens": prev_hiddens_tensor,
"prev_cells": prev_cells_tensor,
"input_feed": input_feed,
},
)
self.set_incremental_state(incremental_state, "cached_state", cache_state)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
if hasattr(self, "additional_fc") and self.adaptive_softmax is None:
x = self.additional_fc(x)
x = self.dropout_out_module(x)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
if not self.training and self.need_attn and self.attention is not None:
assert attn_scores is not None
attn_scores = attn_scores.transpose(0, 2)
else:
attn_scores = None
return x, attn_scores
def output_layer(self, x):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = self.fc_out(x)
return x
def get_cached_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
) -> Tuple[List[Tensor], List[Tensor], Optional[Tensor]]:
cached_state = self.get_incremental_state(incremental_state, "cached_state")
assert cached_state is not None
prev_hiddens_ = cached_state["prev_hiddens"]
assert prev_hiddens_ is not None
prev_cells_ = cached_state["prev_cells"]
assert prev_cells_ is not None
prev_hiddens = [prev_hiddens_[i] for i in range(self.num_layers)]
prev_cells = [prev_cells_[j] for j in range(self.num_layers)]
input_feed = cached_state[
"input_feed"
] # can be None for decoder-only language models
return prev_hiddens, prev_cells, input_feed
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
if incremental_state is None or len(incremental_state) == 0:
return
prev_hiddens, prev_cells, input_feed = self.get_cached_state(incremental_state)
prev_hiddens = [p.index_select(0, new_order) for p in prev_hiddens]
prev_cells = [p.index_select(0, new_order) for p in prev_cells]
if input_feed is not None:
input_feed = input_feed.index_select(0, new_order)
cached_state_new = torch.jit.annotate(
Dict[str, Optional[Tensor]],
{
"prev_hiddens": torch.stack(prev_hiddens),
"prev_cells": torch.stack(prev_cells),
"input_feed": input_feed,
},
)
self.set_incremental_state(incremental_state, "cached_state", cached_state_new),
return
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.max_target_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if "weight" in name or "bias" in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0.0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture("lstm", "lstm")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
args.encoder_hidden_size = getattr(
args, "encoder_hidden_size", args.encoder_embed_dim
)
args.encoder_layers = getattr(args, "encoder_layers", 1)
args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
args.decoder_hidden_size = getattr(
args, "decoder_hidden_size", args.decoder_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 1)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
args.decoder_attention = getattr(args, "decoder_attention", "1")
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
@register_model_architecture("lstm", "lstm_wiseman_iwslt_de_en")
def lstm_wiseman_iwslt_de_en(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_dropout_in = getattr(args, "encoder_dropout_in", 0)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_dropout_in = getattr(args, "decoder_dropout_in", 0)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
base_architecture(args)
@register_model_architecture("lstm", "lstm_luong_wmt_en_de")
def lstm_luong_wmt_en_de(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1000)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_dropout_out = getattr(args, "encoder_dropout_out", 0)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1000)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 1000)
args.decoder_dropout_out = getattr(args, "decoder_dropout_out", 0)
base_architecture(args)
| 30,557
| 39.420635
| 91
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/lightconv_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.lightconv import Embedding, LightConvDecoder
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
@register_model("lightconv_lm")
class LightConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout",
default=0.1,
type=float,
metavar="D",
help="dropout probability",
)
parser.add_argument(
"--attention-dropout",
default=0.0,
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--relu-dropout",
default=0.0,
type=float,
metavar="D",
help="dropout probability after ReLU in FFN",
)
parser.add_argument(
"--input-dropout",
type=float,
metavar="D",
help="dropout probability of the inputs",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-output-dim",
type=int,
metavar="N",
help="decoder output dimension",
)
parser.add_argument(
"--decoder-input-dim", type=int, metavar="N", help="decoder input dimension"
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads or LightConv/DynamicConv heads",
)
parser.add_argument(
"--decoder-normalize-before",
default=False,
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--adaptive-softmax-factor",
type=float,
metavar="N",
help="adaptive input factor",
)
parser.add_argument(
"--no-token-positional-embeddings",
default=False,
action="store_true",
help="if set, disables positional embeddings (outside self attention)",
)
parser.add_argument(
"--share-decoder-input-output-embed",
default=False,
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--character-embeddings",
default=False,
action="store_true",
help="if set, uses character embedding convolutions to produce token embeddings",
)
parser.add_argument(
"--character-filters",
type=str,
metavar="LIST",
default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
help="size of character embeddings",
)
parser.add_argument(
"--character-embedding-dim",
type=int,
metavar="N",
default=4,
help="size of character embeddings",
)
parser.add_argument(
"--char-embedder-highway-layers",
type=int,
metavar="N",
default=2,
help="number of highway layers for character token embeddder",
)
parser.add_argument(
"--adaptive-input",
default=False,
action="store_true",
help="if set, uses adaptive input",
)
parser.add_argument(
"--adaptive-input-factor",
type=float,
metavar="N",
help="adaptive input factor",
)
parser.add_argument(
"--adaptive-input-cutoff",
metavar="EXPR",
help="comma separated list of adaptive input cutoff points.",
)
parser.add_argument(
"--tie-adaptive-weights",
action="store_true",
help="if set, ties the weights of adaptive softmax and adaptive input",
)
parser.add_argument(
"--tie-adaptive-proj",
action="store_true",
help="if set, ties the projection weights of adaptive softmax and adaptive input",
)
parser.add_argument(
"--decoder-learned-pos",
action="store_true",
help="use learned positional embeddings in the decoder",
)
"""LightConv and DynamicConv arguments"""
parser.add_argument(
"--decoder-kernel-size-list",
type=lambda x: utils.eval_str_list(x, int),
help='list of kernel size (default: "[3,7,15,31,31,31]")',
)
parser.add_argument(
"--decoder-glu", type=utils.eval_bool, help="glu after in proj"
)
parser.add_argument(
"--decoder-conv-type",
default="dynamic",
type=str,
choices=["dynamic", "lightweight"],
help="type of convolution",
)
parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool)
parser.add_argument(
"--weight-dropout",
type=float,
metavar="D",
help="dropout probability for conv weights",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = args.tokens_per_sample
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.dictionary),
task.dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
utils.eval_str_list(args.adaptive_input_cutoff, type=int),
)
else:
embed_tokens = Embedding(
len(task.dictionary), args.decoder_input_dim, task.dictionary.pad()
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = LightConvDecoder(
args,
task.output_dictionary,
embed_tokens,
no_encoder_attn=True,
final_norm=False,
)
return LightConvLanguageModel(decoder)
@register_model_architecture("lightconv_lm", "lightconv_lm")
def base_lm_architecture(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.character_embeddings = getattr(args, "character_embeddings", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim)
# The model training is not stable without this
args.decoder_normalize_before = True
args.adaptive_input = getattr(args, "adaptive_input", False)
args.adaptive_input_factor = getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = getattr(args, "tie_adaptive_proj", False)
args.decoder_kernel_size_list = getattr(
args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31]
)
if len(args.decoder_kernel_size_list) == 1:
args.decoder_kernel_size_list = (
args.decoder_kernel_size_list * args.decoder_layers
)
assert (
len(args.decoder_kernel_size_list) == args.decoder_layers
), "decoder_kernel_size_list doesn't match decoder_layers"
args.decoder_glu = getattr(args, "decoder_glu", True)
args.input_dropout = getattr(args, "input_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout)
@register_model_architecture("lightconv_lm", "lightconv_lm_gbw")
def lightconv_lm_gbw(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
| 11,154
| 35.335505
| 94
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/masked_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
SinusoidalPositionalEmbedding,
TransformerSentenceEncoder,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import safe_hasattr
logger = logging.getLogger(__name__)
@register_model("masked_lm")
class MaskedLMModel(FairseqEncoderModel):
"""
Class for training a Masked Language Model. It also supports an
additional sentence level prediction if the sent-loss argument is set.
"""
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
# if specified then apply bert initialization on the model. We need
# to explictly call this to make sure that the output embeddings
# and projection layers are also correctly initialized
if getattr(args, "apply_bert_init", False):
self.apply(init_bert_params)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# Arguments related to dropout
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for" " attention weights",
)
parser.add_argument(
"--act-dropout",
type=float,
metavar="D",
help="dropout probability after" " activation in FFN",
)
# Arguments related to hidden states and self-attention
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
# Arguments related to input and output embeddings
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--share-encoder-input-output-embed",
action="store_true",
help="share encoder input" " and output embeddings",
)
parser.add_argument(
"--encoder-learned-pos",
action="store_true",
help="use learned positional embeddings in the encoder",
)
parser.add_argument(
"--no-token-positional-embeddings",
action="store_true",
help="if set, disables positional embeddings" " (outside self attention)",
)
parser.add_argument(
"--num-segment", type=int, metavar="N", help="num segment in the input"
)
parser.add_argument(
"--max-positions", type=int, help="number of positional embeddings to learn"
)
# Arguments related to sentence level prediction
parser.add_argument(
"--sentence-class-num",
type=int,
metavar="N",
help="number of classes for sentence task",
)
parser.add_argument(
"--sent-loss",
action="store_true",
help="if set," " calculate sentence level predictions",
)
# Arguments related to parameter initialization
parser.add_argument(
"--apply-bert-init",
action="store_true",
help="use custom param initialization for BERT",
)
# misc params
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="Which activation function to use for pooler layer.",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
def forward(self, src_tokens, segment_labels=None, **kwargs):
return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs)
def max_positions(self):
return self.encoder.max_positions
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not safe_hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
logger.info(args)
encoder = MaskedLMEncoder(args, task.dictionary)
return cls(args, encoder)
class MaskedLMEncoder(FairseqEncoder):
"""
Encoder for Masked Language Modelling.
"""
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.padding_idx = dictionary.pad()
self.vocab_size = dictionary.__len__()
self.max_positions = args.max_positions
self.sentence_encoder = TransformerSentenceEncoder(
padding_idx=self.padding_idx,
vocab_size=self.vocab_size,
num_encoder_layers=args.encoder_layers,
embedding_dim=args.encoder_embed_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.act_dropout,
max_seq_len=self.max_positions,
num_segments=args.num_segment,
use_position_embeddings=not args.no_token_positional_embeddings,
encoder_normalize_before=args.encoder_normalize_before,
apply_bert_init=args.apply_bert_init,
activation_fn=args.activation_fn,
learned_pos_embedding=args.encoder_learned_pos,
)
self.share_input_output_embed = args.share_encoder_input_output_embed
self.embed_out = None
self.sentence_projection_layer = None
self.sentence_out_dim = args.sentence_class_num
self.lm_output_learned_bias = None
# Remove head is set to true during fine-tuning
self.load_softmax = not getattr(args, "remove_head", False)
self.masked_lm_pooler = nn.Linear(
args.encoder_embed_dim, args.encoder_embed_dim
)
self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn)
self.lm_head_transform_weight = nn.Linear(
args.encoder_embed_dim, args.encoder_embed_dim
)
self.activation_fn = utils.get_activation_fn(args.activation_fn)
self.layer_norm = LayerNorm(args.encoder_embed_dim)
self.lm_output_learned_bias = None
if self.load_softmax:
self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size))
if not self.share_input_output_embed:
self.embed_out = nn.Linear(
args.encoder_embed_dim, self.vocab_size, bias=False
)
if args.sent_loss:
self.sentence_projection_layer = nn.Linear(
args.encoder_embed_dim, self.sentence_out_dim, bias=False
)
def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused):
"""
Forward pass for Masked LM encoder. This first computes the token
embedding using the token embedding matrix, position embeddings (if
specified) and segment embeddings (if specified).
Here we assume that the sentence representation corresponds to the
output of the classification_token (see bert_task or cross_lingual_lm
task for more details).
Args:
- src_tokens: B x T matrix representing sentences
- segment_labels: B x T matrix representing segment label for tokens
Returns:
- a tuple of the following:
- logits for predictions in format B x T x C to be used in
softmax afterwards
- a dictionary of additional data, where 'pooled_output' contains
the representation for classification_token and 'inner_states'
is a list of internal model states used to compute the
predictions (similar in ELMO). 'sentence_logits'
is the prediction logit for NSP task and is only computed if
this is specified in the input arguments.
"""
inner_states, sentence_rep = self.sentence_encoder(
src_tokens,
segment_labels=segment_labels,
)
x = inner_states[-1].transpose(0, 1)
# project masked tokens only
if masked_tokens is not None:
x = x[masked_tokens, :]
x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x)))
pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep))
# project back to size of vocabulary
if self.share_input_output_embed and hasattr(
self.sentence_encoder.embed_tokens, "weight"
):
x = F.linear(x, self.sentence_encoder.embed_tokens.weight)
elif self.embed_out is not None:
x = self.embed_out(x)
if self.lm_output_learned_bias is not None:
x = x + self.lm_output_learned_bias
sentence_logits = None
if self.sentence_projection_layer:
sentence_logits = self.sentence_projection_layer(pooled_output)
return x, {
"inner_states": inner_states,
"pooled_output": pooled_output,
"sentence_logits": sentence_logits,
}
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.max_positions
def upgrade_state_dict_named(self, state_dict, name):
if isinstance(
self.sentence_encoder.embed_positions, SinusoidalPositionalEmbedding
):
state_dict[
name + ".sentence_encoder.embed_positions._float_tensor"
] = torch.FloatTensor(1)
if not self.load_softmax:
for k in list(state_dict.keys()):
if (
"embed_out.weight" in k
or "sentence_projection_layer.weight" in k
or "lm_output_learned_bias" in k
):
del state_dict[k]
return state_dict
@register_model_architecture("masked_lm", "masked_lm")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.act_dropout = getattr(args, "act_dropout", 0.0)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.num_segment = getattr(args, "num_segment", 2)
args.sentence_class_num = getattr(args, "sentence_class_num", 2)
args.sent_loss = getattr(args, "sent_loss", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
@register_model_architecture("masked_lm", "bert_base")
def bert_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.num_segment = getattr(args, "num_segment", 2)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 3072)
args.sentence_class_num = getattr(args, "sentence_class_num", 2)
args.sent_loss = getattr(args, "sent_loss", True)
args.apply_bert_init = getattr(args, "apply_bert_init", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
base_architecture(args)
@register_model_architecture("masked_lm", "bert_large")
def bert_large_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
bert_base_architecture(args)
@register_model_architecture("masked_lm", "xlm_base")
def xlm_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.share_encoder_input_output_embed = getattr(
args, "share_encoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.num_segment = getattr(args, "num_segment", 1)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.sent_loss = getattr(args, "sent_loss", False)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.apply_bert_init = getattr(args, "apply_bert_init", True)
base_architecture(args)
| 15,244
| 36.641975
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/model_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
import torch
from torch import Tensor
@torch.jit.script
def script_skip_tensor_list(x: List[Tensor], mask):
res = [xi[mask] if xi.size(0) == mask.size(0) else xi[:, mask] for xi in x]
outputs = []
for i, t in enumerate(res):
if t.numel() != 0:
outputs.append(t)
else:
outputs.append(x[i])
return outputs
@torch.jit.script
def script_skip_tensor(x: Tensor, mask):
# None case
if x.size(0) == 0:
return x
res = x[mask] if x.size(0) == mask.size(0) else x[:, mask]
if res.numel() == 0:
return x
else:
return res
@torch.jit.script
def expand_2d_or_3d_tensor(x, trg_dim: int, padding_idx: int):
"""
Expand 2D/3D tensor on dim=1
"""
if x is None:
return None
assert x.dim() == 2 or x.dim() == 3
assert trg_dim >= x.size(1), (trg_dim, x.size())
if trg_dim == x.size(1):
return x
dims = [x.size(0), trg_dim - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, torch.zeros(dims).to(x).fill_(padding_idx)], 1)
return x
@torch.jit.script
def coalesce(x: Optional[Tensor], y: Tensor) -> Tensor:
return x if x is not None else y
@torch.jit.script
def fill_tensors(
x: Optional[Tensor], mask, y: Optional[Tensor], padding_idx: int
) -> Optional[Tensor]:
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None or x.size()[0] == 0 or y is None:
return x
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
if n_selected == 0:
return x
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
x = expand_2d_or_3d_tensor(x, y.size(1), padding_idx)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = torch.tensor(padding_idx).type_as(x)
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
| 2,343
| 24.204301
| 79
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fconv_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
from fairseq.utils import safe_hasattr
@register_model("fconv_lm")
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-layers",
type=str,
metavar="EXPR",
help="decoder layers [(dim, kernel_size), ...]",
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--decoder-attention",
type=str,
metavar="EXPR",
help="decoder attention [True, ...]",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if safe_hasattr(args, "max_target_positions") and not safe_hasattr(
args, "tokens_per_sample"
):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
@register_model_architecture("fconv_lm", "fconv_lm")
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103")
def fconv_lm_dauphin_wikitext103(args):
layers = "[(850, 6)] * 3"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 5)] * 4"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 4)] * 3"
layers += " + [(1024, 4)] * 1"
layers += " + [(2048, 4)] * 1"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,20000,200000"
)
base_lm_architecture(args)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw")
def fconv_lm_dauphin_gbw(args):
layers = "[(512, 5)]"
layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3"
layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3"
layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6"
layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
base_lm_architecture(args)
| 5,015
| 35.613139
| 81
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/composite_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .fairseq_encoder import FairseqEncoder
class CompositeEncoder(FairseqEncoder):
"""
A wrapper around a dictionary of :class:`FairseqEncoder` objects.
We run forward on each encoder and return a dictionary of outputs. The first
encoder's dictionary is used for initialization.
Args:
encoders (dict): a dictionary of :class:`FairseqEncoder` objects.
"""
def __init__(self, encoders):
super().__init__(next(iter(encoders.values())).dictionary)
self.encoders = encoders
for key in self.encoders:
self.add_module(key, self.encoders[key])
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
the outputs from each Encoder
"""
encoder_out = {}
for key in self.encoders:
encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
"""Reorder encoder output according to new_order."""
for key in self.encoders:
encoder_out[key] = self.encoders[key].reorder_encoder_out(
encoder_out[key], new_order
)
return encoder_out
def max_positions(self):
return min(self.encoders[key].max_positions() for key in self.encoders)
def upgrade_state_dict(self, state_dict):
for key in self.encoders:
self.encoders[key].upgrade_state_dict(state_dict)
return state_dict
| 1,928
| 32.258621
| 80
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fairseq_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, NamedTuple, Optional
import torch
import torch.nn as nn
from torch import Tensor
EncoderOut = NamedTuple(
"EncoderOut",
[
("encoder_out", Tensor), # T x B x C
("encoder_padding_mask", Optional[Tensor]), # B x T
("encoder_embedding", Optional[Tensor]), # B x T x C
("encoder_states", Optional[List[Tensor]]), # List[T x B x C]
("src_tokens", Optional[Tensor]), # B x T
("src_lengths", Optional[Tensor]), # B x 1
],
)
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def forward_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward(
src_tokens=net_input["src_tokens"],
src_lengths=net_input["src_lengths"],
)
else:
return self.forward_non_torchscript(net_input)
@torch.jit.unused
def forward_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens"
}
return self.forward(**encoder_input)
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
| 2,930
| 30.516129
| 78
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fconv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
BeamableMM,
FairseqDropout,
GradMultiply,
LearnedPositionalEmbedding,
LinearizedConvolution,
)
@register_model("fconv")
class FConvModel(FairseqEncoderDecoderModel):
"""
A fully convolutional model, i.e. a convolutional encoder and a
convolutional decoder, as described in `"Convolutional Sequence to Sequence
Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.
Args:
encoder (FConvEncoder): the encoder
decoder (FConvDecoder): the decoder
The Convolutional model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.fconv_parser
:prog:
"""
@classmethod
def hub_models(cls):
def moses_subword(path):
return {
"path": path,
"tokenizer": "moses",
"bpe": "subword_nmt",
}
return {
"conv.wmt14.en-fr": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2"
),
"conv.wmt14.en-de": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2"
),
"conv.wmt17.en-de": moses_subword(
"https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2"
),
}
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(
layer is not None for layer in decoder.attention
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--share-input-output-embed', action='store_true',
help='share input and output embeddings (requires'
' --decoder-out-embed-dim and --decoder-embed-dim'
' to be equal)')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
encoder_embed_dict = None
if args.encoder_embed_path:
encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)
utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)
decoder_embed_dict = None
if args.decoder_embed_path:
decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)
utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)
encoder = FConvEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
embed_dict=encoder_embed_dict,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
)
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
embed_dict=decoder_embed_dict,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
share_embed=args.share_input_output_embed,
)
return FConvModel(encoder, decoder)
class FConvEncoder(FairseqEncoder):
"""
Convolutional encoder consisting of `len(convolutions)` layers.
Args:
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_dim (int, optional): embedding dimension
embed_dict (str, optional): filename from which to load pre-trained
embeddings
max_positions (int, optional): maximum supported input sequence length
convolutions (list, optional): the convolutional layer structure. Each
list item `i` corresponds to convolutional layer `i`. Layers are
given as ``(out_channels, kernel_width, [residual])``. Residual
connections are added between layers when ``residual=1`` (which is
the default behavior).
dropout (float, optional): dropout to be applied before each conv layer
"""
def __init__(
self,
dictionary,
embed_dim=512,
embed_dict=None,
max_positions=1024,
convolutions=((512, 3),) * 20,
dropout=0.1,
):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(
embed_dict, self.dictionary, self.embed_tokens
)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
)
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for _, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(
Linear(residual_dim, out_channels)
if residual_dim != out_channels
else None
)
if kernel_size % 2 == 1:
padding = kernel_size // 2
else:
padding = 0
self.convolutions.append(
ConvTBC(
in_channels,
out_channels * 2,
kernel_size,
dropout=dropout,
padding=padding,
)
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
- **encoder_out** (tuple): a tuple with two elements, where the
first element is the last encoder layer's output and the
second element is the same quantity summed with the input
embedding (used for attention). The shape of both tensors is
`(batch, src_len, embed_dim)`.
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = self.dropout_module(x)
input_embedding = x
# project to size of convolution
x = self.fc1(x)
# used to mask padding in input
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
residuals = [x]
# temporal convolutions
for proj, conv, res_layer in zip(
self.projections, self.convolutions, self.residuals
):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = self.dropout_module(x)
if conv.kernel_size[0] % 2 == 1:
# padding is implicit in the conv
x = conv(x)
else:
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding) * math.sqrt(0.5)
return {
"encoder_out": (x, y),
"encoder_padding_mask": encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = (
encoder_out["encoder_out"][0].index_select(0, new_order),
encoder_out["encoder_out"][1].index_select(0, new_order),
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim, bmm=None):
super().__init__()
# projects from output of convolution to embedding dimension
self.in_projection = Linear(conv_channels, embed_dim)
# projects from embedding dimension to convolution size
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = bmm if bmm is not None else torch.bmm
def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):
residual = x
# attention
x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
x = self.bmm(x, encoder_out[0])
# don't attend over padding
if encoder_padding_mask is not None:
x = (
x.float()
.masked_fill(encoder_padding_mask.unsqueeze(1), float("-inf"))
.type_as(x)
) # FP16 support: cast to float and back
# softmax over last dim
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
x = self.bmm(x, encoder_out[1])
# scale attention output (respecting potentially different lengths)
s = encoder_out[1].size(1)
if encoder_padding_mask is None:
x = x * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(x).sum(
dim=1, keepdim=True
) # exclude padding
s = s.unsqueeze(-1)
x = x * (s * s.rsqrt())
# project back
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
"""Replace torch.bmm with BeamableMM."""
if beamable_mm_beam_size is not None:
del self.bmm
self.add_module("bmm", BeamableMM(beamable_mm_beam_size))
class FConvDecoder(FairseqIncrementalDecoder):
"""Convolutional decoder"""
def __init__(
self,
dictionary,
embed_dim=512,
embed_dict=None,
out_embed_dim=256,
max_positions=1024,
convolutions=((512, 3),) * 20,
attention=True,
dropout=0.1,
share_embed=False,
positional_embeddings=True,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0.0,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([2]))
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.need_attn = True
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
# expand True into [True, True, ...] and do the same with False
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError(
"Attention is expected to be a list of booleans of "
"length equal to the number of layers."
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(
embed_dict, self.dictionary, self.embed_tokens
)
self.embed_positions = (
PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
)
if positional_embeddings
else None
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(
Linear(residual_dim, out_channels)
if residual_dim != out_channels
else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels,
out_channels * 2,
kernel_size,
padding=(kernel_size - 1),
dropout=dropout,
)
)
self.attention.append(
AttentionLayer(out_channels, embed_dim) if attention[i] else None
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(
num_embeddings,
in_channels,
adaptive_softmax_cutoff,
dropout=adaptive_softmax_dropout,
)
else:
self.fc2 = Linear(in_channels, out_embed_dim)
if share_embed:
assert out_embed_dim == embed_dim, (
"Shared embed weights implies same dimensions "
" out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim)
)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
if encoder_out is not None:
encoder_padding_mask = encoder_out["encoder_padding_mask"]
encoder_out = encoder_out["encoder_out"]
# split and transpose encoder outputs
encoder_a, encoder_b = self._split_encoder_out(
encoder_out, incremental_state
)
if self.embed_positions is not None:
pos_embed = self.embed_positions(prev_output_tokens, incremental_state)
else:
pos_embed = 0
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
x = self._embed_tokens(prev_output_tokens, incremental_state)
# embed tokens and combine with positional embeddings
x += pos_embed
x = self.dropout_module(x)
target_embedding = x
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
# temporal convolutions
avg_attn_scores = None
num_attn_layers = len(self.attention)
residuals = [x]
for proj, conv, attention, res_layer in zip(
self.projections, self.convolutions, self.attention, self.residuals
):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
x = self.dropout_module(x)
x = conv(x, incremental_state)
x = F.glu(x, dim=2)
# attention
if attention is not None:
x = self._transpose_if_training(x, incremental_state)
x, attn_scores = attention(
x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask
)
if not self.training and self.need_attn:
attn_scores = attn_scores / num_attn_layers
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
x = self._transpose_if_training(x, incremental_state)
# residual
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = self._transpose_if_training(x, incremental_state)
# project back to size of vocabulary if not using adaptive softmax
if self.fc2 is not None and self.fc3 is not None:
x = self.fc2(x)
x = self.dropout_module(x)
x = self.fc3(x)
return x, avg_attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
encoder_out = utils.get_incremental_state(
self, incremental_state, "encoder_out"
)
if encoder_out is not None:
encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)
utils.set_incremental_state(
self, incremental_state, "encoder_out", encoder_out
)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return (
self.embed_positions.max_positions
if self.embed_positions is not None
else float("inf")
)
def upgrade_state_dict(self, state_dict):
if utils.item(state_dict.get("decoder.version", torch.Tensor([1]))[0]) < 2:
# old models use incorrect weight norm dimension
for i, conv in enumerate(self.convolutions):
# reconfigure weight norm
nn.utils.remove_weight_norm(conv)
self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)
state_dict["decoder.version"] = torch.Tensor([1])
return state_dict
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _embed_tokens(self, tokens, incremental_state):
if incremental_state is not None:
# keep only the last token for incremental forward pass
tokens = tokens[:, -1:]
return self.embed_tokens(tokens)
def _split_encoder_out(self, encoder_out, incremental_state):
"""Split and transpose encoder outputs.
This is cached when doing incremental inference.
"""
cached_result = utils.get_incremental_state(
self, incremental_state, "encoder_out"
)
if cached_result is not None:
return cached_result
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(1, 2).contiguous()
result = (encoder_a, encoder_b)
if incremental_state is not None:
utils.set_incremental_state(self, incremental_state, "encoder_out", result)
return result
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def extend_conv_spec(convolutions):
"""
Extends convolutional spec that is a list of tuples of 2 or 3 parameters
(kernel size, dim size and optionally how many layers behind to look for residual)
to default the residual propagation param if it is not specified
"""
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception(
"invalid number of parameters in convolution spec "
+ str(spec)
+ ". expected 2 or 3"
)
return tuple(extended)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0.0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m)
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
@register_model_architecture("fconv", "fconv")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 20")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 20")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_attention = getattr(args, "decoder_attention", "True")
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
@register_model_architecture("fconv", "fconv_iwslt_de_en")
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", "[(256, 3)] * 4")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_layers = getattr(args, "decoder_layers", "[(256, 3)] * 3")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_ro")
def fconv_wmt_en_ro(args):
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_de")
def fconv_wmt_en_de(args):
convs = "[(512, 3)] * 9" # first 9 layers have 512 units
convs += " + [(1024, 3)] * 4" # next 4 layers have 1024 units
convs += " + [(2048, 1)] * 2" # final 2 layers use 1x1 convolutions
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_layers = getattr(args, "encoder_layers", convs)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_layers = getattr(args, "decoder_layers", convs)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
@register_model_architecture("fconv", "fconv_wmt_en_fr")
def fconv_wmt_en_fr(args):
convs = "[(512, 3)] * 6" # first 6 layers have 512 units
convs += " + [(768, 3)] * 4" # next 4 layers have 768 units
convs += " + [(1024, 3)] * 3" # next 3 layers have 1024 units
convs += " + [(2048, 1)] * 1" # next 1 layer uses 1x1 convolutions
convs += " + [(4096, 1)] * 1" # final 1 layer uses 1x1 convolutions
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_layers = getattr(args, "encoder_layers", convs)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 768)
args.decoder_layers = getattr(args, "decoder_layers", convs)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
base_architecture(args)
| 28,661
| 36.862616
| 95
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/lightconv.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import (
AdaptiveSoftmax,
DynamicConv,
FairseqDropout,
LayerNorm,
LightweightConv,
MultiheadAttention,
PositionalEmbedding,
)
from fairseq.utils import safe_hasattr
@register_model("lightconv")
class LightConvModel(FairseqEncoderDecoderModel):
"""
LightConv and DynamicConv model from `"Pay Less Attention with Lightweight and Dynamic Convolutions" (Wu, et al, 2019)
<https://openreview.net/pdf?id=SkVhlh09tX>`_.
To use LightConv please set ``--encoder-conv-type lightweight --decoder-conv-type lightweight``
To use DynamicConv please set ``--encoder-conv-type dynamic --decoder-conv-type dynamic``
Args:
encoder (LightConvEncoder): the encoder
decoder (LightConvDecoder): the decoder
The LightConv model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.lightconv_parser
:prog:
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
return {
'lightconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz'),
'dynamicconv.no_glu.iwslt14.de-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz'),
'lightconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz'),
'dynamicconv.no_glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz'),
'lightconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'),
'dynamicconv.glu.wmt16.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'),
'lightconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz'),
'dynamicconv.glu.wmt17.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz'),
'lightconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz'),
'dynamicconv.glu.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz'),
'lightconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz'),
'dynamicconv.glu.wmt17.zh-en': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz'),
}
# fmt: on
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after ReLU in FFN",
)
parser.add_argument(
"--input-dropout",
type=float,
metavar="D",
help="dropout probability of the inputs",
)
parser.add_argument(
"--encoder-embed-path",
type=str,
metavar="STR",
help="path to pre-trained encoder embedding",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-conv-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads or LightConv/DynamicConv heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--encoder-learned-pos",
action="store_true",
help="use learned positional embeddings in the encoder",
)
parser.add_argument(
"--decoder-embed-path",
type=str,
metavar="STR",
help="path to pre-trained decoder embedding",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-conv-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads or LightConv/DynamicConv heads",
)
parser.add_argument(
"--decoder-learned-pos",
action="store_true",
help="use learned positional embeddings in the decoder",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--share-all-embeddings",
action="store_true",
help="share encoder, decoder and output embeddings"
" (requires shared dictionary and embed dim)",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
),
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
"""LightConv and DynamicConv arguments"""
parser.add_argument(
"--encoder-kernel-size-list",
type=lambda x: utils.eval_str_list(x, int),
help='list of kernel size (default: "[3,7,15,31,31,31,31]")',
)
parser.add_argument(
"--decoder-kernel-size-list",
type=lambda x: utils.eval_str_list(x, int),
help='list of kernel size (default: "[3,7,15,31,31,31]")',
)
parser.add_argument(
"--encoder-glu", type=utils.eval_bool, help="glu after in proj"
)
parser.add_argument(
"--decoder-glu", type=utils.eval_bool, help="glu after in proj"
)
parser.add_argument(
"--encoder-conv-type",
default="dynamic",
type=str,
choices=["dynamic", "lightweight"],
help="type of convolution",
)
parser.add_argument(
"--decoder-conv-type",
default="dynamic",
type=str,
choices=["dynamic", "lightweight"],
help="type of convolution",
)
parser.add_argument("--weight-softmax", default=True, type=utils.eval_bool)
parser.add_argument(
"--weight-dropout",
type=float,
metavar="D",
help="dropout probability for conv weights",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not safe_hasattr(args, "max_source_positions"):
args.max_source_positions = 1024
if not safe_hasattr(args, "max_target_positions"):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise RuntimeError(
"--share-all-embeddings requires a joined dictionary"
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise RuntimeError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = LightConvEncoder(args, src_dict, encoder_embed_tokens)
decoder = LightConvDecoder(args, tgt_dict, decoder_embed_tokens)
return LightConvModel(encoder, decoder)
class LightConvEncoder(FairseqEncoder):
"""
LightConv encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`LightConvEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
LightConvEncoderLayer(
args, kernel_size=args.encoder_kernel_size_list[i]
)
for i in range(args.encoder_layers)
]
)
self.register_buffer("version", torch.Tensor([2]))
self.normalize = args.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, src_tokens, **unused):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.normalize:
x = self.layer_norm(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
class LightConvDecoder(FairseqIncrementalDecoder):
"""
LightConv decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`LightConvDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
"""
def __init__(
self, args, dictionary, embed_tokens, no_encoder_attn=False, final_norm=True
):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
LightConvDecoderLayer(
args, no_encoder_attn, kernel_size=args.decoder_kernel_size_list[i]
)
for i in range(args.decoder_layers)
]
)
self.adaptive_softmax = None
self.project_out_dim = (
Linear(embed_dim, output_embed_dim, bias=False)
if embed_dim != output_embed_dim and not args.tie_adaptive_weights
else None
)
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim**-0.5)
self.register_buffer("version", torch.Tensor([2]))
self.normalize = args.decoder_normalize_before and final_norm
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["encoder_padding_mask"]
if encoder_out is not None
else None,
incremental_state,
)
inner_states.append(x)
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = F.linear(x, self.embed_out)
return x, {"attn": attn, "inner_states": inner_states}
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
class LightConvEncoderLayer(nn.Module):
"""Encoder layer block.
Args:
args (argparse.Namespace): parsed command-line arguments
kernel_size: kernel size of the convolution
"""
def __init__(self, args, kernel_size=0):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.conv_dim = args.encoder_conv_dim
padding_l = (
kernel_size // 2
if kernel_size % 2 == 1
else ((kernel_size - 1) // 2, kernel_size // 2)
)
if args.encoder_glu:
self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)
self.act = nn.GLU()
else:
self.linear1 = Linear(self.embed_dim, self.conv_dim)
self.act = None
if args.encoder_conv_type == "lightweight":
self.conv = LightweightConv(
self.conv_dim,
kernel_size,
padding_l=padding_l,
weight_softmax=args.weight_softmax,
num_heads=args.encoder_attention_heads,
weight_dropout=args.weight_dropout,
)
elif args.encoder_conv_type == "dynamic":
self.conv = DynamicConv(
self.conv_dim,
kernel_size,
padding_l=padding_l,
weight_softmax=args.weight_softmax,
num_heads=args.encoder_attention_heads,
weight_dropout=args.weight_dropout,
)
else:
raise NotImplementedError
self.linear2 = Linear(self.conv_dim, self.embed_dim)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.relu_dropout_module = FairseqDropout(
args.relu_dropout, module_name=self.__class__.__name__
)
self.input_dropout_module = FairseqDropout(
args.input_dropout, module_name=self.__class__.__name__
)
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for _ in range(2)])
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x = self.input_dropout_module(x)
x = self.linear1(x)
if self.act is not None:
x = self.act(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.transpose(0, 1).unsqueeze(2), 0)
x = self.conv(x)
x = self.linear2(x)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = F.relu(self.fc1(x))
x = self.relu_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(1, x, after=True)
return x
def maybe_layer_norm(self, i, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return self.layer_norms[i](x)
else:
return x
def extra_repr(self):
return (
"dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format(
self.dropout_module.p,
self.relu_dropout_module.p,
self.input_dropout_module.p,
self.normalize_before,
)
)
class LightConvDecoderLayer(nn.Module):
"""Decoder layer block.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
kernel_size: kernel size of the convolution
"""
def __init__(self, args, no_encoder_attn=False, kernel_size=0):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.conv_dim = args.decoder_conv_dim
if args.decoder_glu:
self.linear1 = Linear(self.embed_dim, 2 * self.conv_dim)
self.act = nn.GLU()
else:
self.linear1 = Linear(self.embed_dim, self.conv_dim)
self.act = None
if args.decoder_conv_type == "lightweight":
self.conv = LightweightConv(
self.conv_dim,
kernel_size,
padding_l=kernel_size - 1,
weight_softmax=args.weight_softmax,
num_heads=args.decoder_attention_heads,
weight_dropout=args.weight_dropout,
)
elif args.decoder_conv_type == "dynamic":
self.conv = DynamicConv(
self.conv_dim,
kernel_size,
padding_l=kernel_size - 1,
weight_softmax=args.weight_softmax,
num_heads=args.decoder_attention_heads,
weight_dropout=args.weight_dropout,
)
else:
raise NotImplementedError
self.linear2 = Linear(self.conv_dim, self.embed_dim)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.relu_dropout_module = FairseqDropout(
args.relu_dropout, module_name=self.__class__.__name__
)
self.input_dropout_module = FairseqDropout(
args.input_dropout, module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
self.conv_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
def forward(
self,
x,
encoder_out,
encoder_padding_mask,
incremental_state,
prev_conv_state=None,
prev_attn_state=None,
conv_mask=None,
conv_padding_mask=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
residual = x
x = self.maybe_layer_norm(self.conv_layer_norm, x, before=True)
if prev_conv_state is not None:
if incremental_state is None:
incremental_state = {}
self.conv._set_input_buffer(incremental_state, prev_conv_state)
x = self.input_dropout_module(x)
x = self.linear1(x)
if self.act is not None:
x = self.act(x)
x = self.conv(x, incremental_state=incremental_state)
x = self.linear2(x)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(self.conv_layer_norm, x, after=True)
attn = None
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = F.relu(self.fc1(x))
x = self.relu_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)
return x, attn
def maybe_layer_norm(self, layer_norm, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return layer_norm(x)
else:
return x
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def extra_repr(self):
return (
"dropout={}, relu_dropout={}, input_dropout={}, normalize_before={}".format(
self.dropout_module.p,
self.relu_dropout_module.p,
self.input_dropout_module.p,
self.normalize_before,
)
)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
@register_model_architecture("lightconv", "lightconv")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 7)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_conv_dim = getattr(args, "encoder_conv_dim", args.encoder_embed_dim)
args.decoder_conv_dim = getattr(args, "decoder_conv_dim", args.decoder_embed_dim)
args.encoder_kernel_size_list = getattr(
args, "encoder_kernel_size_list", [3, 7, 15, 31, 31, 31, 31]
)
args.decoder_kernel_size_list = getattr(
args, "decoder_kernel_size_list", [3, 7, 15, 31, 31, 31]
)
if len(args.encoder_kernel_size_list) == 1:
args.encoder_kernel_size_list = (
args.encoder_kernel_size_list * args.encoder_layers
)
if len(args.decoder_kernel_size_list) == 1:
args.decoder_kernel_size_list = (
args.decoder_kernel_size_list * args.decoder_layers
)
assert (
len(args.encoder_kernel_size_list) == args.encoder_layers
), "encoder_kernel_size_list doesn't match encoder_layers"
assert (
len(args.decoder_kernel_size_list) == args.decoder_layers
), "decoder_kernel_size_list doesn't match decoder_layers"
args.encoder_glu = getattr(args, "encoder_glu", True)
args.decoder_glu = getattr(args, "decoder_glu", True)
args.input_dropout = getattr(args, "input_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", args.attention_dropout)
@register_model_architecture("lightconv", "lightconv_iwslt_de_en")
def lightconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 7)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.weight_dropout = getattr(args, "weight_dropout", 0.1)
args.encoder_glu = getattr(args, "encoder_glu", False)
args.decoder_glu = getattr(args, "decoder_glu", False)
args.input_dropout = getattr(args, "input_dropout", 0.0)
base_architecture(args)
@register_model_architecture("lightconv", "lightconv_wmt_en_de")
def lightconv_wmt_en_de(args):
base_architecture(args)
@register_model_architecture("lightconv", "lightconv_wmt_en_de_big")
def lightconv_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("lightconv", "lightconv_wmt_en_fr_big")
def lightconv_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
lightconv_wmt_en_de_big(args)
@register_model_architecture("lightconv", "lightconv_wmt_zh_en_big")
def lightconv_wmt_zh_en_big(args):
args.dropout = getattr(args, "dropout", 0.2)
args.attention_dropout = getattr(args, "attention_dropout", 0.2)
args.weight_dropout = getattr(args, "weight_dropout", 0.2)
lightconv_wmt_en_de_big(args)
| 38,651
| 36.894118
| 165
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fconv_self_att.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.models import (
CompositeEncoder,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
DownsampledMultiHeadAttention,
FairseqDropout,
GradMultiply,
LayerNorm,
LearnedPositionalEmbedding,
LinearizedConvolution,
)
logger = logging.getLogger(__name__)
@register_model("fconv_self_att")
class FConvModelSelfAtt(FairseqEncoderDecoderModel):
@classmethod
def hub_models(cls):
return {
"conv.stories.pretrained": {
"path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz",
"checkpoint_file": "pretrained_checkpoint.pt",
"tokenizer": "nltk",
},
"conv.stories": {
"path": "https://dl.fbaipublicfiles.com/fairseq/models/stories_checkpoint.tar.gz",
"checkpoint_file": "fusion_checkpoint.pt",
"tokenizer": "nltk",
"pretrained": "True",
"pretrained_checkpoint": "./pretrained_checkpoint.pt",
},
# Test set containing dictionaries
"data.stories": "https://dl.fbaipublicfiles.com/fairseq/data/stories_test.tar.bz2",
}
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(
layer is not None for layer in decoder.attention
)
self.pretrained_encoder = pretrained_encoder
if self.pretrained_encoder is None:
encoders = {"encoder": encoder}
else:
encoders = {"encoder": encoder, "pretrained": self.pretrained_encoder}
# for fusion model, CompositeEncoder contains both pretrained and training encoders
# these are forwarded and then combined in the decoder
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR',
help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int,
help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int,
help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR',
help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int,
help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR',
help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR',
help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR',
help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR',
help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR',
help='use pretrained model when training [True, ...]')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
trained_encoder, trained_decoder = None, None
pretrained = eval(args.pretrained)
if pretrained:
logger.info("loading pretrained model")
if not os.path.exists(args.pretrained_checkpoint):
new_pretrained_checkpoint = os.path.join(
args.data, args.pretrained_checkpoint
)
if os.path.exists(new_pretrained_checkpoint):
args.pretrained_checkpoint = new_pretrained_checkpoint
trained_model = checkpoint_utils.load_model_ensemble(
filenames=[args.pretrained_checkpoint],
task=task,
)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
# freeze pretrained model
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
encoder = FConvEncoder(
task.source_dictionary,
embed_dim=args.encoder_embed_dim,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
attention=eval(args.encoder_attention),
attention_nheads=args.encoder_attention_nheads,
)
decoder = FConvDecoder(
task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
selfattention=eval(args.self_attention),
attention_nheads=args.multihead_attention_nheads,
selfattention_nheads=args.multihead_self_attention_nheads,
project_input=eval(args.project_input),
gated_attention=eval(args.gated_attention),
downsample=eval(args.downsample),
pretrained=pretrained,
trained_decoder=trained_decoder,
)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return self.pretrained_encoder is not None
class FConvEncoder(FairseqEncoder):
"""Convolutional encoder"""
def __init__(
self,
dictionary,
embed_dim=512,
max_positions=1024,
convolutions=((512, 3),) * 20,
dropout=0.1,
attention=False,
attention_nheads=1,
):
super().__init__(dictionary)
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
)
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels)
if in_channels != out_channels
else None
)
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout)
)
self.attention.append(
SelfAttention(out_channels, embed_dim, attention_nheads)
if attention[i]
else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = self.dropout_module(x)
input_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
for proj, conv, attention in zip(
self.projections, self.convolutions, self.attention
):
residual = x if proj is None else proj(x)
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = self.dropout_module(x)
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
x = attention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)
return {
"encoder_out": (x, y),
"encoder_padding_mask": encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = tuple(
eo.index_select(0, new_order) for eo in encoder_out["encoder_out"]
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if "pretrained" in encoder_out:
encoder_out["pretrained"]["encoder_out"] = tuple(
eo.index_select(0, new_order)
for eo in encoder_out["pretrained"]["encoder_out"]
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions
@with_incremental_state
class FConvDecoder(FairseqDecoder):
"""Convolutional decoder"""
def __init__(
self,
dictionary,
embed_dim=512,
out_embed_dim=256,
max_positions=1024,
convolutions=((512, 3),) * 8,
attention=True,
dropout=0.1,
selfattention=False,
attention_nheads=1,
selfattention_nheads=1,
project_input=False,
gated_attention=False,
downsample=False,
pretrained=False,
trained_decoder=None,
):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError(
"Attention is expected to be a list of booleans of "
"length equal to the number of layers."
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels)
if in_channels != out_channels
else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels,
out_channels * 2,
kernel_size,
padding=(kernel_size - 1),
dropout=dropout,
)
)
self.attention.append(
DownsampledMultiHeadAttention(
out_channels,
embed_dim,
attention_nheads,
project_input=project_input,
gated=False,
downsample=False,
)
if attention[i]
else None
)
self.attproj.append(
Linear(out_channels, embed_dim, dropout=dropout)
if attention[i]
else None
)
self.selfattention.append(
SelfAttention(
out_channels,
embed_dim,
selfattention_nheads,
project_input=project_input,
gated=gated_attention,
downsample=downsample,
)
if selfattention[i]
else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
# model fusion
if self.pretrained:
# independent gates are learned from the concatenated input
self.gate1 = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid()
)
self.gate2 = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim), nn.Sigmoid()
)
# pretrained and trained models are joined
self.joining = nn.Sequential(
Linear(out_embed_dim * 2, out_embed_dim * 2),
LayerNorm(out_embed_dim * 2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim * 2),
LayerNorm(out_embed_dim * 2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim),
LayerNorm(out_embed_dim),
)
# pretrained model contains an output layer that is nhid -> vocab size
# but the models are combined in their hidden state
# the hook stores the output of the pretrained model forward
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs["out"] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out):
trained_encoder_out = encoder_out["pretrained"] if self.pretrained else None
encoder_out = encoder_out["encoder"]["encoder_out"]
encoder_a, encoder_b = self._split_encoder_out(encoder_out)
# embed positions
positions = self.embed_positions(prev_output_tokens)
# embed tokens and positions
x = self.embed_tokens(prev_output_tokens) + positions
x = self.dropout_module(x)
target_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
avg_attn_scores = None
for proj, conv, attention, selfattention, attproj in zip(
self.projections,
self.convolutions,
self.attention,
self.selfattention,
self.attproj,
):
residual = x if proj is None else proj(x)
x = self.dropout_module(x)
x = conv(x)
x = F.glu(x, dim=2)
# attention
if attention is not None:
r = x
x, attn_scores = attention(
attproj(x) + target_embedding, encoder_a, encoder_b
)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
x = self.fc2(x)
x = self.dropout_module(x)
if not self.pretrained:
x = self.fc3(x)
# fusion gating
if self.pretrained:
trained_x, _ = self.pretrained_decoder.forward(
prev_output_tokens, trained_encoder_out
)
y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs["out"]
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return fusion_output, avg_attn_scores
else:
return x, avg_attn_scores
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
"""Split and transpose encoder outputs."""
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
class SelfAttention(nn.Module):
def __init__(
self,
out_channels,
embed_dim,
num_heads,
project_input=False,
gated=False,
downsample=False,
):
super().__init__()
self.attention = DownsampledMultiHeadAttention(
out_channels,
embed_dim,
num_heads,
dropout=0,
bias=True,
project_input=project_input,
gated=gated,
downsample=downsample,
)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
x, _ = self.attention(
query, key, value, mask_future_timesteps=True, use_scalar_bias=True
)
return self.ln(x + residual)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def Linear(in_features, out_features, dropout=0.0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0.0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
@register_model_architecture("fconv_self_att", "fconv_self_att")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_layers = getattr(args, "encoder_layers", "[(512, 3)] * 3")
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_layers = getattr(args, "decoder_layers", "[(512, 3)] * 8")
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.decoder_attention = getattr(args, "decoder_attention", "True")
args.self_attention = getattr(args, "self_attention", "False")
args.encoder_attention = getattr(args, "encoder_attention", "False")
args.multihead_attention_nheads = getattr(args, "multihead_attention_nheads", 1)
args.multihead_self_attention_nheads = getattr(
args, "multihead_self_attention_nheads", 1
)
args.encoder_attention_nheads = getattr(args, "encoder_attention_nheads", 1)
args.project_input = getattr(args, "project_input", "False")
args.gated_attention = getattr(args, "gated_attention", "False")
args.downsample = getattr(args, "downsample", "False")
args.pretrained_checkpoint = getattr(args, "pretrained_checkpoint", "")
args.pretrained = getattr(args, "pretrained", "False")
@register_model_architecture("fconv_self_att", "fconv_self_att_wp")
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(
args, "encoder_layers", "[(128, 3)] * 2 + [(512,3)] * 1"
)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_layers = getattr(
args, "decoder_layers", "[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1"
)
args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 256)
args.self_attention = getattr(args, "self_attention", "True")
args.multihead_self_attention_nheads = getattr(
args, "multihead_self_attention_nheads", 4
)
args.project_input = getattr(args, "project_input", "True")
args.gated_attention = getattr(args, "gated_attention", "True")
args.downsample = getattr(args, "downsample", "True")
base_architecture(args)
| 25,347
| 36.552593
| 98
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fairseq_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch.nn as nn
from fairseq import utils
from torch import Tensor
class FairseqDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
self.adaptive_softmax = None
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
x = self.output_layer(x)
return x, extra
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def output_layer(self, features, **kwargs):
"""
Project features to the default output size, e.g., vocabulary size.
Args:
features (Tensor): features returned by *extract_features*.
"""
raise NotImplementedError
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return out.exp_() if not log_probs else out
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=-1, onnx_trace=self.onnx_trace)
def max_positions(self):
"""Maximum input length supported by the decoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code."""
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True
| 3,750
| 34.72381
| 83
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fairseq_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various fairseq models.
"""
import logging
from argparse import Namespace
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.dataclass.utils import (
convert_namespace_to_omegaconf,
gen_parser_from_dataclass,
)
from fairseq.models import FairseqDecoder, FairseqEncoder
from omegaconf import DictConfig
from torch import Tensor
logger = logging.getLogger(__name__)
def check_type(module, expected_type):
if hasattr(module, "unwrapped_module"):
assert isinstance(
module.unwrapped_module, expected_type
), f"{type(module.unwrapped_module)} != {expected_type}"
else:
assert isinstance(module, expected_type), f"{type(module)} != {expected_type}"
class BaseFairseqModel(nn.Module):
"""Base class for fairseq models."""
def __init__(self):
super().__init__()
self._is_generation_fast = False
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(parser, dc(), delete_default=True)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError("Model must implement the build_model method")
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample["target"]
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def get_normalized_probs_scriptable(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Scriptable helper function for get_normalized_probs in ~BaseFairseqModel"""
if hasattr(self, "decoder"):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
# syntactic sugar for simple models which don't have a decoder
# (e.g., the classification tutorial)
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def extract_features(self, *args, **kwargs):
"""Similar to *forward* but only return features."""
return self(*args, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return None
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn(
"using 'args' is deprecated, please update your code to use dataclass config"
)
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, "")
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade old state dicts to work with newer code.
Args:
state_dict (dict): state dictionary to upgrade, in place
name (str): the state dict key corresponding to the current module
"""
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += "."
for n, c in m.named_children():
name = prefix + n
if hasattr(c, "upgrade_state_dict_named"):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, "upgrade_state_dict"):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
for m in self.modules():
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
def prepare_for_inference_(self, cfg: DictConfig):
"""Prepare model for inference."""
kwargs = {}
kwargs["beamable_mm_beam_size"] = (
None
if getattr(cfg.generation, "no_beamable_mm", False)
else getattr(cfg.generation, "beam", 5)
)
kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False)
if getattr(cfg.generation, "retain_dropout", False):
kwargs["retain_dropout"] = cfg.generation.retain_dropout
kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules
self.make_generation_fast_(**kwargs)
def make_generation_fast_(self, **kwargs):
"""
Legacy entry point to optimize model for faster generation.
Prefer prepare_for_inference_.
"""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except (AttributeError, ValueError): # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module, prefix):
if len(prefix) > 0:
prefix += "."
base_func = BaseFairseqModel.make_generation_fast_
for n, m in module.named_modules():
if (
m != self
and hasattr(m, "make_generation_fast_")
# don't call this implementation again, e.g., if
# children modules also inherit from BaseFairseqModel
and m.make_generation_fast_.__func__ is not base_func
):
name = prefix + n
m.make_generation_fast_(name=name, **kwargs)
apply_make_generation_fast_(self, "")
def train(mode=True):
if mode:
raise RuntimeError("cannot train after make_generation_fast")
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
seen = set()
def apply_prepare_for_onnx_export_(module):
if (
module != self
and hasattr(module, "prepare_for_onnx_export_")
and module not in seen
):
seen.add(module)
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
**kwargs,
):
"""
Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model
file. Downloads and caches the pre-trained model file if needed.
The base implementation returns a
:class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to
generate translations or sample from language models. The underlying
:class:`~fairseq.models.FairseqModel` can be accessed via the
*generator.models* attribute.
Other models may override this to implement custom hub interfaces.
Args:
model_name_or_path (str): either the name of a pre-trained model to
load or a path/URL to a pre-trained model state dict
checkpoint_file (str, optional): colon-separated list of checkpoint
files in the model archive to ensemble (default: 'model.pt')
data_name_or_path (str, optional): point args.data to the archive
at the given path/URL. Can start with '.' or './' to reuse the
model archive path.
"""
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
**kwargs,
)
logger.info(x["args"])
return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"])
@classmethod
def hub_models(cls):
return {}
class FairseqEncoderDecoderModel(BaseFairseqModel):
"""Base class for encoder-decoder models.
Args:
encoder (FairseqEncoder): the encoder
decoder (FairseqDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., teacher forcing) to
the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return decoder_out
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(
prev_output_tokens, encoder_out=encoder_out, **kwargs
)
return features
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
class FairseqModel(FairseqEncoderDecoderModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
utils.deprecation_warning(
"FairseqModel is deprecated, please use FairseqEncoderDecoderModel "
"or BaseFairseqModel instead",
stacklevel=4,
)
class FairseqMultiModel(BaseFairseqModel):
"""Base class for combining multiple encoder-decoder models."""
def __init__(self, encoders, decoders):
super().__init__()
assert encoders.keys() == decoders.keys()
self.keys = list(encoders.keys())
for key in self.keys:
check_type(encoders[key], FairseqEncoder)
check_type(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict(
{
key: FairseqEncoderDecoderModel(encoders[key], decoders[key])
for key in self.keys
}
)
@staticmethod
def build_shared_embeddings(
dicts: Dict[str, Dictionary],
langs: List[str],
embed_dim: int,
build_embedding: callable,
pretrained_embed_path: Optional[str] = None,
):
"""
Helper function to build shared embeddings for a set of languages after
checking that all dicts corresponding to those languages are equivalent.
Args:
dicts: Dict of lang_id to its corresponding Dictionary
langs: languages that we want to share embeddings for
embed_dim: embedding dimension
build_embedding: callable function to actually build the embedding
pretrained_embed_path: Optional path to load pretrained embeddings
"""
shared_dict = dicts[langs[0]]
if any(dicts[lang] != shared_dict for lang in langs):
raise ValueError(
"--share-*-embeddings requires a joined dictionary: "
"--share-encoder-embeddings requires a joined source "
"dictionary, --share-decoder-embeddings requires a joined "
"target dictionary, and --share-all-embeddings requires a "
"joint source + target dictionary."
)
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return {
key: (
self.models[key].encoder.max_positions(),
self.models[key].decoder.max_positions(),
)
for key in self.keys
}
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return min(model.decoder.max_positions() for model in self.models.values())
@property
def encoder(self):
return self.models[self.keys[0]].encoder
@property
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg=None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn(
"using 'args' is deprecated, please update your code to use dataclass config"
)
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict)
class FairseqLanguageModel(BaseFairseqModel):
"""Base class for decoder-only models.
Args:
decoder (FairseqDecoder): the decoder
"""
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
check_type(self.decoder, FairseqDecoder)
def forward(self, src_tokens, **kwargs):
"""
Run the forward pass for a decoder-only model.
Feeds a batch of tokens through the decoder to predict the next tokens.
Args:
src_tokens (LongTensor): tokens on which to condition the decoder,
of shape `(batch, tgt_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
tuple:
- the decoder's output of shape `(batch, seq_len, vocab)`
- a dictionary with any model-specific outputs
"""
return self.decoder(src_tokens, **kwargs)
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def extract_features(self, src_tokens, **kwargs):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, seq_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
return self.decoder.extract_features(src_tokens, **kwargs)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return self.decoder.max_positions()
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
@property
def supported_targets(self):
return {"future"}
class FairseqEncoderModel(BaseFairseqModel):
"""Base class for encoder-only models.
Args:
encoder (FairseqEncoder): the encoder
"""
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
check_type(self.encoder, FairseqEncoder)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
Run the forward pass for a encoder-only model.
Feeds a batch of tokens through the encoder to generate features.
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
the encoder's output, typically of shape `(batch, src_len, features)`
"""
return self.encoder(src_tokens, src_lengths, **kwargs)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
encoder_out = net_output["encoder_out"]
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return self.encoder.max_positions()
| 20,408
| 34.68007
| 98
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/fairseq_incremental_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, Optional
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.models import FairseqDecoder
from torch import Tensor
logger = logging.getLogger(__name__)
@with_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the previous
output token (for teacher forcing) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`FairseqDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`FairseqIncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
To learn more about how incremental decoding works, refer to `this blog
<http://www.telesens.co/2019/04/21/understanding-incremental-decoding-in-fairseq/>`_.
"""
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
"""
Args:
prev_output_tokens (LongTensor): shifted output tokens of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (dict, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict, optional): dictionary used for storing
state during :ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
"""
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
raise NotImplementedError
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
pass
def reorder_incremental_state_scripting(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Main entry point for reordering the incremental state.
Due to limitations in TorchScript, we call this function in
:class:`fairseq.sequence_generator.SequenceGenerator` instead of
calling :func:`reorder_incremental_state` directly.
"""
for module in self.modules():
if hasattr(module, "reorder_incremental_state"):
result = module.reorder_incremental_state(incremental_state, new_order)
if result is not None:
incremental_state = result
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, "_beam_size", -1) != beam_size:
seen = set()
def apply_set_beam_size(module):
if (
module != self
and hasattr(module, "set_beam_size")
and module not in seen
):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
| 4,468
| 36.554622
| 89
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from contextlib import ExitStack
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import open_dict, OmegaConf
from .composite_encoder import CompositeEncoder
from .distributed_fairseq_model import DistributedFairseqModel
from .fairseq_decoder import FairseqDecoder
from .fairseq_encoder import FairseqEncoder
from .fairseq_incremental_decoder import FairseqIncrementalDecoder
from .fairseq_model import (
BaseFairseqModel,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqLanguageModel,
FairseqModel,
FairseqMultiModel,
)
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
__all__ = [
"BaseFairseqModel",
"CompositeEncoder",
"DistributedFairseqModel",
"FairseqDecoder",
"FairseqEncoder",
"FairseqEncoderDecoderModel",
"FairseqEncoderModel",
"FairseqIncrementalDecoder",
"FairseqLanguageModel",
"FairseqModel",
"FairseqMultiModel",
]
def build_model(cfg: FairseqDataclass, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
# this is hit if config object is nested in directory that is named after model type
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type. "
"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
if model_type in ARCH_MODEL_REGISTRY:
# case 1: legacy models
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
# case 2: config-driven models
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
# set defaults from dataclass. note that arch name and model name can be the same
dc = MODEL_DATACLASS_REGISTRY[model_type]
if isinstance(cfg, argparse.Namespace):
cfg = dc.from_namespace(cfg)
else:
cfg = merge_with_parent(dc(), cfg)
else:
if model_type in ARCH_CONFIG_REGISTRY:
with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack():
# this calls the different "arch" functions (like base_architecture()) that you indicate
# if you specify --arch on the command line. this is only applicable to the old argparse based models
# hydra models should expose different architectures via different config files
# it will modify the cfg object and default parameters according to the arch
ARCH_CONFIG_REGISTRY[model_type](cfg)
assert model is not None, (
f"Could not infer model type from {cfg}. "
"Available models: {}".format(MODEL_DATACLASS_REGISTRY.keys())
+ f" Requested model type: {model_type}"
)
return model.build_model(cfg, task)
def register_model(name, dataclass=None):
"""
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqEncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqEncoderDecoderModel` for
sequence-to-sequence tasks or :class:`FairseqLanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError(
"Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__)
)
MODEL_REGISTRY[name] = cls
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="model", node=node, provider="fairseq")
@register_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""
New model architectures can be added to fairseq with the
:func:`register_model_architecture` function decorator. After registration,
model architectures can be selected with the ``--arch`` command-line
argument.
For example::
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(cfg):
args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000)
(...)
The decorated function should take a single argument *cfg*, which is a
:class:`omegaconf.DictConfig`. The decorated function should modify these
arguments in-place to match the desired architecture.
Args:
model_name (str): the name of the Model (Model must already be
registered)
arch_name (str): the name of the model architecture (``--arch``)
"""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError(
"Cannot register model architecture for unknown model type ({})".format(
model_name
)
)
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError(
"Cannot register duplicate model architecture ({})".format(arch_name)
)
if not callable(fn):
raise ValueError(
"Model architecture must be callable ({})".format(arch_name)
)
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
def import_models(models_dir, namespace):
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
import_models(models_dir, "fairseq.models")
| 8,206
| 33.923404
| 117
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer_lm.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Optional
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
DEFAULT_MIN_PARAMS_TO_WRAP,
Embedding,
TransformerDecoder,
)
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from fairseq.utils import safe_getattr, safe_hasattr
from omegaconf import II
DEFAULT_MAX_TARGET_POSITIONS = 1024
@dataclass
class TransformerLanguageModelConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu", metadata={"help": "activation function to use"}
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
relu_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN."}
)
decoder_embed_dim: int = field(
default=512, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=512, metadata={"help": "decoder output dimension"}
)
decoder_input_dim: int = field(
default=512, metadata={"help": "decoder input dimension"}
)
decoder_ffn_embed_dim: int = field(
default=2048, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num decoder layers"})
decoder_attention_heads: int = field(
default=8, metadata={"help": "num decoder attention heads"}
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_decoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last decoder block"},
)
adaptive_softmax_cutoff: Optional[str] = field(
default=None,
metadata={
"help": "comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion"
},
)
adaptive_softmax_dropout: float = field(
default=0,
metadata={"help": "sets adaptive softmax dropout for the tail projections"},
)
adaptive_softmax_factor: float = field(
default=4, metadata={"help": "adaptive input factor"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
character_embeddings: bool = field(
default=False,
metadata={
"help": "if set, uses character embedding convolutions to produce token embeddings"
},
)
character_filters: str = field(
default="[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]",
metadata={"help": "size of character embeddings"},
)
character_embedding_dim: int = field(
default=4, metadata={"help": "size of character embeddings"}
)
char_embedder_highway_layers: int = field(
default=2,
metadata={"help": "number of highway layers for character token embeddder"},
)
adaptive_input: bool = field(
default=False, metadata={"help": "if set, uses adaptive input"}
)
adaptive_input_factor: float = field(
default=4, metadata={"help": "adaptive input factor"}
)
adaptive_input_cutoff: Optional[str] = field(
default=None,
metadata={"help": "comma separated list of adaptive input cutoff points."},
)
tie_adaptive_weights: bool = field(
default=False,
metadata={
"help": "if set, ties the weights of adaptive softmax and adaptive input"
},
)
tie_adaptive_proj: bool = field(
default=False,
metadata={
"help": "if set, ties the projection weights of adaptive softmax and adaptive input"
},
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False, metadata={"help": "checkpoint activations at each layer"}
)
offload_activations: bool = field(
default=False,
metadata={"help": "move checkpointed activations to CPU after they are used."},
)
# config for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "LayerDrop probability for decoder"}
)
decoder_layers_to_keep: Optional[str] = field(
default=None,
metadata={
"help": "which layers to *keep* when pruning as a comma-separated list"
},
)
# config for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
quant_noise_pq: float = field(
default=0.0,
metadata={"help": "iterative PQ quantization noise at training time"},
)
quant_noise_pq_block_size: int = field(
default=8,
metadata={"help": "block size of quantization noise at training time"},
)
quant_noise_scalar: float = field(
default=0.0,
metadata={
"help": "scalar quantization noise and scalar quantization at training time"
},
)
# config for Fully Sharded Data Parallel (FSDP) training
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": (
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
)
},
)
# config for "BASE Layers: Simplifying Training of Large, Sparse Models"
base_layers: Optional[int] = field(
default=0, metadata={"help": "number of BASE layers in total"}
)
base_sublayers: Optional[int] = field(
default=1, metadata={"help": "number of sublayers in each BASE layer"}
)
base_shuffle: Optional[int] = field(
default=1,
metadata={"help": "shuffle tokens between workers before computing assignment"},
)
# NormFormer
scale_fc: Optional[bool] = field(
default=False,
metadata={"help": "Insert LayerNorm between fully connected layers"},
)
scale_attn: Optional[bool] = field(
default=False, metadata={"help": "Insert LayerNorm after attention"}
)
scale_heads: Optional[bool] = field(
default=False,
metadata={"help": "Learn a scale coefficient for each attention head"},
)
scale_resids: Optional[bool] = field(
default=False,
metadata={"help": "Learn a scale coefficient for each residual connection"},
)
# options from other parts of the config
add_bos_token: bool = II("task.add_bos_token")
tokens_per_sample: int = II("task.tokens_per_sample")
max_target_positions: Optional[int] = II("task.max_target_positions")
tpu: bool = II("common.tpu")
@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig)
class TransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
def moses_fastbpe(path):
return {"path": path, "tokenizer": "moses", "bpe": "fastbpe"}
def spm(path):
return {"path": path, "tokenizer": "space", "bpe": "sentencepiece"}
return {
"transformer_lm.gbw.adaptive_huge": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2",
"transformer_lm.wiki103.adaptive": "https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.v2.tar.bz2",
"transformer_lm.wmt19.en": moses_fastbpe(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.en.tar.bz2"
),
"transformer_lm.wmt19.de": moses_fastbpe(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.de.tar.bz2"
),
"transformer_lm.wmt19.ru": moses_fastbpe(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt19.ru.tar.bz2"
),
"transformer_lm.wmt20.en": spm(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.en.tar.gz"
),
"transformer_lm.wmt20.ta": spm(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.ta.tar.gz"
),
"transformer_lm.wmt20.iu.news": spm(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.news.tar.gz"
),
"transformer_lm.wmt20.iu.nh": spm(
"https://dl.fbaipublicfiles.com/fairseq/models/lm/wmt20.iu.nh.tar.gz"
),
}
def __init__(self, decoder):
super().__init__(decoder)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if safe_getattr(args, "max_target_positions", None) is None:
args.max_target_positions = safe_getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.source_dictionary,
eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.source_dictionary),
task.source_dictionary.pad(),
args.decoder_input_dim,
args.adaptive_input_factor,
args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
embed_tokens = cls.build_embedding(
args, task.source_dictionary, args.decoder_input_dim
)
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert (
args.adaptive_softmax_cutoff == args.adaptive_input_cutoff
), "{} != {}".format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff
)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoder(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True
)
return cls(decoder)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
embed_tokens = Embedding(len(dictionary), embed_dim, dictionary.pad())
return embed_tokens
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if safe_hasattr(args, "no_tie_adaptive_proj"):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if safe_hasattr(args, "decoder_final_norm"):
args.no_decoder_final_norm = not args.decoder_final_norm
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 2048)
args.decoder_layers = safe_getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8)
args.adaptive_softmax_cutoff = safe_getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0)
args.adaptive_softmax_factor = safe_getattr(args, "adaptive_softmax_factor", 4)
args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", False)
args.activation_fn = safe_getattr(args, "activation_fn", "relu")
args.decoder_layerdrop = safe_getattr(args, "decoder_layerdrop", 0)
args.decoder_layers_to_keep = safe_getattr(args, "decoder_layers_to_keep", None)
args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0)
args.base_layers = safe_getattr(args, "base_layers", 0)
args.base_sublayers = safe_getattr(args, "base_sublayers", 1)
args.base_shuffle = safe_getattr(args, "base_shuffle", False)
args.add_bos_token = safe_getattr(args, "add_bos_token", False)
args.no_token_positional_embeddings = safe_getattr(
args, "no_token_positional_embeddings", False
)
args.share_decoder_input_output_embed = safe_getattr(
args, "share_decoder_input_output_embed", False
)
args.character_embeddings = safe_getattr(args, "character_embeddings", False)
args.decoder_output_dim = safe_getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = safe_getattr(
args, "decoder_input_dim", args.decoder_embed_dim
)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", False)
args.adaptive_input = safe_getattr(args, "adaptive_input", False)
args.adaptive_input_factor = safe_getattr(args, "adaptive_input_factor", 4)
args.adaptive_input_cutoff = safe_getattr(args, "adaptive_input_cutoff", None)
args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", False)
args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", False)
args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False)
args.checkpoint_activations = safe_getattr(args, "checkpoint_activations", False)
args.offload_activations = safe_getattr(args, "offload_activations", False)
args.scale_fc = safe_getattr(args, "scale_fc", False)
args.scale_attn = safe_getattr(args, "scale_attn", False)
args.scale_heads = safe_getattr(args, "scale_heads", False)
args.scale_resids = safe_getattr(args, "scale_resids", False)
if args.offload_activations:
args.checkpoint_activations = True
@register_model_architecture("transformer_lm", "transformer_lm_big")
def transformer_lm_big(args):
args.decoder_layers = safe_getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_wiki103")
@register_model_architecture("transformer_lm", "transformer_lm_baevski_wiki103")
def transformer_lm_baevski_wiki103(args):
args.decoder_layers = safe_getattr(args, "decoder_layers", 16)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 8)
args.dropout = safe_getattr(args, "dropout", 0.3)
args.adaptive_input = safe_getattr(args, "adaptive_input", True)
args.tie_adaptive_weights = safe_getattr(args, "tie_adaptive_weights", True)
args.adaptive_input_cutoff = safe_getattr(
args, "adaptive_input_cutoff", "20000,60000"
)
args.adaptive_softmax_cutoff = safe_getattr(
args, "adaptive_softmax_cutoff", "20000,60000"
)
args.adaptive_softmax_dropout = safe_getattr(args, "adaptive_softmax_dropout", 0.2)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_dropout = safe_getattr(args, "activation_dropout", 0.1)
args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True)
args.tie_adaptive_proj = safe_getattr(args, "tie_adaptive_proj", True)
transformer_lm_big(args)
@register_model_architecture("transformer_lm", "transformer_lm_gbw")
@register_model_architecture("transformer_lm", "transformer_lm_baevski_gbw")
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 512)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.no_decoder_final_norm = safe_getattr(args, "no_decoder_final_norm", True)
transformer_lm_big(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt")
def transformer_lm_gpt(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 3072)
args.decoder_layers = safe_getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_small")
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_layers = safe_getattr(args, "decoder_layers", 24)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_tiny")
def transformer_lm_gpt2_tiny(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 64)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 64)
args.decoder_layers = safe_getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 1)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_medium")
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1280)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 5120)
args.decoder_layers = safe_getattr(args, "decoder_layers", 36)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 20)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt2_big")
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1600)
args.decoder_ffn_embed_dim = safe_getattr(args, "decoder_ffn_embed_dim", 6400)
args.decoder_layers = safe_getattr(args, "decoder_layers", 48)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 25)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
def base_gpt3_architecture(args):
args.decoder_input_dim = args.decoder_embed_dim
args.decoder_output_dim = args.decoder_embed_dim
args.decoder_ffn_embed_dim = safe_getattr(
args, "decoder_ffn_embed_dim", args.decoder_embed_dim * 4
)
# GPT-3 used learned positional embeddings, rather than sinusoidal
args.decoder_learned_pos = safe_getattr(args, "decoder_learned_pos", True)
args.dropout = safe_getattr(args, "dropout", 0.0)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.0)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
args.share_decoder_input_output_embed = True
base_lm_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_small")
def transformer_lm_gpt3_small(args):
# 125M params
args.decoder_layers = safe_getattr(args, "decoder_layers", 12)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 768)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 12)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_medium")
def transformer_lm_gpt3_medium(args):
# 350M params
args.decoder_layers = safe_getattr(args, "decoder_layers", 24)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1024)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_large")
def transformer_lm_gpt3_large(args):
# 760M params
args.decoder_layers = safe_getattr(args, "decoder_layers", 24)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 1536)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 16)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_xl")
def transformer_lm_gpt3_xl(args):
# 1.3B params
args.decoder_layers = safe_getattr(args, "decoder_layers", 24)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2048)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_2_7")
def transformer_lm_gpt3_2_7(args):
# 2.7B params
args.decoder_layers = safe_getattr(args, "decoder_layers", 32)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 2560)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_6_7")
def transformer_lm_gpt3_6_7(args):
# 6.7B params
args.decoder_layers = safe_getattr(args, "decoder_layers", 32)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 4096)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 32)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_13")
def transformer_lm_gpt3_13(args):
# 13B params
args.decoder_layers = safe_getattr(args, "decoder_layers", 40)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 5120)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 40)
base_gpt3_architecture(args)
@register_model_architecture("transformer_lm", "transformer_lm_gpt3_175")
def transformer_lm_gpt3_175(args):
# 175B params
args.decoder_layers = safe_getattr(args, "decoder_layers", 96)
args.decoder_embed_dim = safe_getattr(args, "decoder_embed_dim", 12288)
args.decoder_attention_heads = safe_getattr(args, "decoder_attention_heads", 96)
base_gpt3_architecture(args)
| 25,286
| 42.977391
| 129
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/distributed_fairseq_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import signal
import threading
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from fairseq.distributed import (
DistributedTimeoutWrapper,
LegacyDistributedDataParallel,
ModuleProxyWrapper,
TPUDistributedDataParallel,
)
logger = logging.getLogger(__name__)
_SLOWMO_DDP_DISABLED = False
try:
from fairscale.experimental.nn.data_parallel import (
SlowMoBaseAlgorithm,
SlowMoDistributedDataParallel,
)
except ImportError:
_SLOWMO_DDP_DISABLED = True
def DistributedFairseqModel(args, model, process_group, device):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
device: device to move model to
"""
assert isinstance(model, nn.Module)
if args.tpu:
wrapped_model = TPUDistributedDataParallel(
module=model.to(device),
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"c10d", "pytorch_ddp"}:
wrapped_model = DistributedDataParallel(
module=model.to(device),
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
find_unused_parameters=args.find_unused_parameters,
gradient_as_bucket_view=args.gradient_as_bucket_view,
)
if args.ddp_comm_hook == "fp16":
logger.info("enable fp16 communication hook in DDP")
try:
from torch.distributed.algorithms.ddp_comm_hooks import (
register_ddp_comm_hook,
DDPCommHookType,
)
except:
logger.error(
"Could not import from torch.distributed.algorithms.ddp_comm_hooks; you may need to update your pytorch version"
)
raise
register_ddp_comm_hook(DDPCommHookType.FP16_COMPRESS, wrapped_model)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend in {"no_c10d", "legacy_ddp"}:
wrapped_model = LegacyDistributedDataParallel(
module=model.to(device),
buffer_size=2**28,
process_group=process_group,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "slowmo":
if _SLOWMO_DDP_DISABLED:
raise ImportError(
"Cannot find SlowMoDistributedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
slowmo_base_algorithm = SlowMoBaseAlgorithm[args.slowmo_base_algorithm.upper()]
wrapped_model = SlowMoDistributedDataParallel(
module=model.to(device),
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
slowmo_base_algorithm=slowmo_base_algorithm,
localsgd_frequency=args.localsgd_frequency,
)
# forward missing getattr and state_dict/load_state_dict to orig model
wrapped_model = ModuleProxyWrapper(wrapped_model)
elif args.ddp_backend == "fully_sharded":
try:
from fairscale.nn.data_parallel import FullyShardedDataParallel as FSDP
except ImportError:
raise ImportError(
"Cannot find FullyShardedDataParallel. "
"Please install fairscale with: pip install fairscale"
)
assert isinstance(model, FSDP), "expected model to already be wrapped in FSDP"
wrapped_model = model
if args.memory_efficient_fp16:
wrapped_model = wrapped_model.half()
if not args.cpu_offload:
wrapped_model = wrapped_model.to(device=device)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
# kill hung distributed jobs after a timeout
if getattr(args, "heartbeat_timeout", -1) > 0:
wrapped_model = DistributedTimeoutWrapper(
wrapped_model, timeout=getattr(args, "heartbeat_timeout", -1)
)
return wrapped_model
| 5,741
| 37.536913
| 132
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/wav2vec.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import logging
import math
from typing import Optional, Tuple
from omegaconf import II
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GumbelVectorQuantizer,
KmeansVectorQuantizer,
TransposeLast,
)
from fairseq.tasks import FairseqTask
from fairseq.utils import buffered_arange
logger = logging.getLogger(__name__)
AGGREGATOR_CHOICES = ChoiceEnum(["cnn", "gru"])
PROJECT_FEATURES_CHOICES = ChoiceEnum(["none", "same", "new"])
ACTIVATION_CHOICES = ChoiceEnum(["relu", "gelu"])
VQ_TYPE_CHOICES = ChoiceEnum(["none", "gumbel", "kmeans"])
@dataclass
class Wav2VecConfig(FairseqDataclass):
prediction_steps: int = field(
default=12, metadata={"help": "number of steps ahead to predict"}
)
sample_distance: Optional[int] = field(
default=None,
metadata={
"help": "sample distance from target. does not work properly with cross-sampling"
},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "num of cross sampled negatives"}
)
num_negatives: int = field(
default=10, metadata={"help": "num of sampled negatives"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5), (512, 8, 4), (512, 4, 2), (512, 4, 2), (512, 4, 2), (512, 1, 1), (512, 1, 1), (512, 1, 1)]",
metadata={
"help": "convolutional feature extraction layers [(dim, kernel_size, stride), ...]"
},
)
conv_aggregator_layers: str = field(
default="[(512, 2, 1), (512, 3, 1), (512, 4, 1), (512, 5, 1), (512, 6, 1), (512, 7, 1), (512, 8, 1), (512, 9, 1), (512, 10, 1), (512, 11, 1), (512, 12, 1), (512, 13, 1)]",
metadata={
"help": "convolutional aggregator layers [(dim, kernel_size, stride), ...]"
},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout to apply within the model"}
)
dropout_features: float = field(
default=0.0, metadata={"help": "dropout to apply to the features"}
)
dropout_agg: float = field(
default=0.0, metadata={"help": "dropout to apply after aggregation step"}
)
aggregator: AGGREGATOR_CHOICES = field(
default="cnn", metadata={"help": "type of aggregator to use"}
)
gru_dim: int = field(default=512, metadata={"help": "GRU dimensionality"})
no_conv_bias: bool = field(
default=False, metadata={"help": "if set, does not learn bias for conv layers"}
)
agg_zero_pad: bool = field(
default=False,
metadata={"help": "if set, zero pads in aggregator instead of repl pad"},
)
skip_connections_feat: bool = field(
default=False,
metadata={"help": "if set, adds skip connections to the feature extractor"},
)
skip_connections_agg: bool = field(
default=True,
metadata={"help": "if set, adds skip connections to the aggregator"},
)
residual_scale: float = field(
default=0.5, metadata={"help": "scales residual by sqrt(value)"}
)
log_compression: bool = field(
default=True,
metadata={"help": "if set, adds a log compression to feature extractor"},
)
balanced_classes: bool = field(
default=False,
metadata={"help": "if set, loss is scaled to balance for number of negatives"},
)
project_features: PROJECT_FEATURES_CHOICES = field(
default="none",
metadata={
"help": "if not none, features are projected using the (same or new) aggregator"
},
)
non_affine_group_norm: bool = field(
default=False, metadata={"help": "if set, group norm is not affine"}
)
offset: str = field(
default="auto",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
activation: ACTIVATION_CHOICES = field(
default="relu",
metadata={
"help": "if set to 'auto', it is computed automatically from the receptive field, else set to int value"
},
)
vq_type: VQ_TYPE_CHOICES = field(
default="none", metadata={"help": "which type of quantizer to use"}
)
vq_vars: int = field(
default=320,
metadata={"help": "project to this many vector quantized variables per group"},
)
vq_groups: int = field(
default=2, metadata={"help": "number of groups of latent variables"}
)
vq_dim: int = field(
default=0,
metadata={
"help": "uses this dimensionality for quantized vectors. 0 to use model dim // groups"
},
)
vq_depth: int = field(
default=1, metadata={"help": "number of layers for vq weight projection"}
)
combine_groups: bool = field(
default=False, metadata={"help": "if set, variables are shared among groups"}
)
vq_temp: Tuple[float, float, float] = field(
default=(2.0, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling with gumbel softmax. should be a tuple of 3 values (start, end, decay)"
},
)
vq_gamma: float = field(
default=0.25,
metadata={"help": "gamma parameter for kmeans style vector quantization"},
)
infonce: bool = II("criterion.infonce")
@register_model("wav2vec", dataclass=Wav2VecConfig)
class Wav2VecModel(BaseFairseqModel):
@classmethod
def build_model(cls, cfg: Wav2VecConfig, task: FairseqTask):
"""Build a new model instance."""
model = Wav2VecModel(cfg)
logger.info(model)
return model
def __init__(self, cfg: Wav2VecConfig):
super().__init__()
self.prediction_steps = cfg.prediction_steps
offset = cfg.offset
if cfg.activation == "relu":
activation = nn.ReLU()
elif cfg.activation == "gelu":
activation = nn.GELU()
else:
raise Exception("unknown activation " + cfg.activation)
feature_enc_layers = eval(cfg.conv_feature_layers)
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
log_compression=cfg.log_compression,
skip_connections=cfg.skip_connections_feat,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
activation=activation,
)
embed = feature_enc_layers[-1][0]
self.vector_quantizer = None
if cfg.vq_type == "gumbel":
self.vector_quantizer = GumbelVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
temp=cfg.vq_temp,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
activation=activation,
weight_proj_depth=cfg.vq_depth,
weight_proj_factor=2,
)
elif cfg.vq_type == "kmeans":
self.vector_quantizer = KmeansVectorQuantizer(
dim=embed,
num_vars=cfg.vq_vars,
groups=cfg.vq_groups,
combine_groups=cfg.combine_groups,
vq_dim=cfg.vq_dim if cfg.vq_dim > 0 else embed,
time_first=False,
gamma=cfg.vq_gamma,
)
else:
assert (
cfg.vq_type == "none" or cfg.vq_type is None
), "Unknown quantizer type"
if cfg.offset == "auto":
jin = 0
rin = 0
for _, k, stride in feature_enc_layers:
if rin == 0:
rin = k
rin = rin + (k - 1) * jin
if jin == 0:
jin = stride
else:
jin *= stride
offset = math.ceil(rin / jin)
offset = int(offset)
def make_aggregator():
if cfg.aggregator == "cnn":
agg_layers = eval(cfg.conv_aggregator_layers)
agg_dim = agg_layers[-1][0]
feature_aggregator = ConvAggegator(
conv_layers=agg_layers,
embed=embed,
dropout=cfg.dropout,
skip_connections=cfg.skip_connections_agg,
residual_scale=cfg.residual_scale,
non_affine_group_norm=cfg.non_affine_group_norm,
conv_bias=not cfg.no_conv_bias,
zero_pad=cfg.agg_zero_pad,
activation=activation,
)
elif cfg.aggregator == "gru":
agg_dim = cfg.gru_dim
feature_aggregator = nn.Sequential(
TransposeLast(),
nn.GRU(
input_size=embed,
hidden_size=agg_dim,
num_layers=1,
dropout=cfg.dropout,
),
TransposeLast(deconstruct_idx=0),
)
else:
raise Exception("unknown aggregator type " + cfg.aggregator)
return feature_aggregator, agg_dim
self.feature_aggregator, agg_dim = make_aggregator()
self.wav2vec_predictions = Wav2VecPredictionsModel(
in_dim=agg_dim,
out_dim=embed,
prediction_steps=cfg.prediction_steps,
n_negatives=cfg.num_negatives,
cross_sample_negatives=cfg.cross_sample_negatives,
sample_distance=cfg.sample_distance,
dropout=cfg.dropout,
offset=offset,
balanced_classes=cfg.balanced_classes,
infonce=cfg.infonce,
)
self.dropout_feats = nn.Dropout(p=cfg.dropout_features)
self.dropout_agg = nn.Dropout(p=cfg.dropout_agg)
if cfg.project_features == "none":
self.project_features = None
elif cfg.project_features == "same":
self.project_features = self.feature_aggregator
elif cfg.project_features == "new":
self.project_features, _ = make_aggregator()
def forward(self, source):
result = {}
features = self.feature_extractor(source)
if self.vector_quantizer:
q_res = self.vector_quantizer(features)
features = q_res["x"]
for k in q_res.keys():
if k != "x":
result[k] = q_res[k]
x = self.dropout_feats(features)
x = self.feature_aggregator(x)
x = self.dropout_agg(x)
if self.project_features is not None:
features = self.project_features(features)
x, targets = self.wav2vec_predictions(x, features)
result["cpc_logits"] = x
result["cpc_targets"] = targets
return result
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
def max_positions(self):
"""Maximum length supported by the model."""
return sys.maxsize
def get_logits(self, net_output):
logits = net_output["cpc_logits"]
return logits
def get_targets(self, sample, net_output):
t = net_output["cpc_targets"]
if isinstance(t, tuple):
t = t[0]
return t.contiguous()
def get_target_weights(self, targets, net_output):
targets = net_output["cpc_targets"]
if isinstance(targets, tuple) and targets[-1] is not None:
return targets[-1]
return None
def get_extra_losses(self, net_output):
loss = None
if "prob_perplexity" in net_output:
loss = net_output["num_vars"] - net_output["prob_perplexity"]
elif "kmeans_loss" in net_output:
loss = net_output["kmeans_loss"]
return loss
def norm_block(is_layer_norm, dim, affine=True):
if is_layer_norm:
mod = nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=affine),
TransposeLast(),
)
else:
mod = Fp32GroupNorm(1, dim, affine=affine)
return mod
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers,
dropout,
log_compression,
skip_connections,
residual_scale,
non_affine_group_norm,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, bias=False),
nn.Dropout(p=dropout),
norm_block(
is_layer_norm=False, dim=n_out, affine=not non_affine_group_norm
),
activation,
)
in_d = 1
self.conv_layers = nn.ModuleList()
for dim, k, stride in conv_layers:
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.log_compression = log_compression
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
residual = x
x = conv(x)
if self.skip_connections and x.size(1) == residual.size(1):
tsz = x.size(2)
r_tsz = residual.size(2)
residual = residual[..., :: r_tsz // tsz][..., :tsz]
x = (x + residual) * self.residual_scale
if self.log_compression:
x = x.abs()
x = x + 1
x = x.log()
return x
class ZeroPad1d(nn.Module):
def __init__(self, pad_left, pad_right):
super().__init__()
self.pad_left = pad_left
self.pad_right = pad_right
def forward(self, x):
return F.pad(x, (self.pad_left, self.pad_right))
class ConvAggegator(nn.Module):
def __init__(
self,
conv_layers,
embed,
dropout,
skip_connections,
residual_scale,
non_affine_group_norm,
conv_bias,
zero_pad,
activation,
):
super().__init__()
def block(n_in, n_out, k, stride):
# padding dims only really make sense for stride = 1
ka = k // 2
kb = ka - 1 if k % 2 == 0 else ka
pad = (
ZeroPad1d(ka + kb, 0) if zero_pad else nn.ReplicationPad1d((ka + kb, 0))
)
return nn.Sequential(
pad,
nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias),
nn.Dropout(p=dropout),
norm_block(False, n_out, affine=not non_affine_group_norm),
activation,
)
in_d = embed
self.conv_layers = nn.ModuleList()
self.residual_proj = nn.ModuleList()
for dim, k, stride in conv_layers:
if in_d != dim and skip_connections:
self.residual_proj.append(nn.Conv1d(in_d, dim, 1, bias=False))
else:
self.residual_proj.append(None)
self.conv_layers.append(block(in_d, dim, k, stride))
in_d = dim
self.conv_layers = nn.Sequential(*self.conv_layers)
self.skip_connections = skip_connections
self.residual_scale = math.sqrt(residual_scale)
def forward(self, x):
for rproj, conv in zip(self.residual_proj, self.conv_layers):
residual = x
x = conv(x)
if self.skip_connections:
if rproj is not None:
residual = rproj(residual)
x = (x + residual) * self.residual_scale
return x
class Wav2VecPredictionsModel(nn.Module):
def __init__(
self,
in_dim,
out_dim,
prediction_steps,
n_negatives,
cross_sample_negatives,
sample_distance,
dropout,
offset,
balanced_classes,
infonce,
):
super().__init__()
self.n_negatives = n_negatives
self.cross_sample_negatives = cross_sample_negatives
self.sample_distance = sample_distance
self.project_to_steps = nn.ConvTranspose2d(
in_dim, out_dim, (1, prediction_steps)
)
self.dropout = nn.Dropout(p=dropout)
self.offset = offset
self.balanced_classes = balanced_classes
self.infonce = infonce
def sample_negatives(self, y):
bsz, fsz, tsz = y.shape
y = y.transpose(0, 1) # BCT -> CBT
y = y.contiguous().view(fsz, -1) # CBT => C(BxT)
cross_high = tsz * bsz
high = tsz if self.sample_distance is None else min(tsz, self.sample_distance)
assert high > 1
neg_idxs = torch.randint(low=0, high=high, size=(bsz, self.n_negatives * tsz))
with torch.no_grad():
if self.n_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * tsz)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(tsz)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * tsz),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[..., neg_idxs.view(-1)]
negs = negs.view(
fsz, bsz, self.n_negatives + self.cross_sample_negatives, tsz
).permute(
2, 1, 0, 3
) # to NxBxCxT
return negs
def forward(self, x, y):
x = x.unsqueeze(-1)
x = self.project_to_steps(x) # BxCxTxS
x = self.dropout(x)
negatives = self.sample_negatives(y)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0) # Copies x B x C x T
copies = targets.size(0)
bsz, dim, tsz, steps = x.shape
steps = min(steps, tsz - self.offset)
predictions = x.new(
bsz * copies * (tsz - self.offset + 1) * steps
- ((steps + 1) * steps // 2) * copies * bsz
)
if self.infonce:
labels = predictions.new_full(
(predictions.shape[0] // copies,), 0, dtype=torch.long
)
else:
labels = torch.zeros_like(predictions)
weights = (
torch.full_like(labels, 1 / self.n_negatives)
if self.balanced_classes and not self.infonce
else None
)
start = end = 0
for i in range(steps):
offset = i + self.offset
end = start + (tsz - offset) * bsz * copies
if self.infonce:
predictions[start:end] = torch.einsum(
"bct,nbct->tbn", x[..., :-offset, i], targets[..., offset:]
).flatten()
else:
pos_num = (end - start) // copies
predictions[start:end] = torch.einsum(
"bct,nbct->nbt", x[..., :-offset, i], targets[..., offset:]
).flatten()
labels[start : start + pos_num] = 1.0
if weights is not None:
weights[start : start + pos_num] = 1.0
start = end
assert end == predictions.numel(), "{} != {}".format(end, predictions.numel())
if self.infonce:
predictions = predictions.view(-1, copies)
else:
if weights is not None:
labels = (labels, weights)
return predictions, labels
| 20,928
| 32.167987
| 179
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/wav2vec2_asr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any, Optional
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import (
LayerNorm,
PositionalEmbedding,
TransformerDecoderLayer,
)
@dataclass
class Wav2Vec2AsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to wav2vec 2.0 model"}
)
no_pretrained_weights: bool = field(
default=False, metadata={"help": "if true, does not load pretrained weights"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"}
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside wav2vec 2.0 model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside wav2vec 2.0 model"
},
)
conv_feature_layers: Optional[str] = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": (
"string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
),
},
)
encoder_embed_dim: Optional[int] = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask (normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
freeze_finetune_updates: int = field(
default=0, metadata={"help": "dont finetune wav2vec for this many updates"}
)
feature_grad_mult: float = field(
default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"}
)
layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"}
)
mask_channel_min_space: Optional[int] = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
mask_channel_before: bool = False
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded wav2vec args
w2v_args: Any = None
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
ddp_backend: str = II("distributed_training.ddp_backend")
@dataclass
class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig):
blank_weight: float = 0
blank_mode: str = "add"
@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig)
class Wav2VecCtc(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
self.blank_weight = cfg.blank_weight
self.blank_mode = cfg.blank_mode
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary))
return cls(cfg, w2v_encoder)
def get_logits(self, net_output, normalize=False):
logits = net_output["encoder_out"]
if self.blank_weight != 0:
if self.blank_mode == "add":
logits[..., 0] += self.blank_weight
elif self.blank_mode == "set":
logits[..., 0] = self.blank_weight
else:
raise Exception(f"invalid blank mode {self.blank_mode}")
if net_output["padding_mask"] is not None and net_output["padding_mask"].any():
number_of_classes = logits.size(-1)
masking_tensor = torch.ones(number_of_classes) * float("-inf")
masking_tensor[0] = float("inf")
logits[net_output["padding_mask"].T] = masking_tensor.type_as(logits)
if normalize:
logits = utils.log_softmax(logits.float(), dim=-1)
return logits
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = self.get_logits(net_output)
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
autoregressive: bool = II("task.autoregressive")
@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig)
class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask):
"""Build a new model instance."""
assert (
cfg.autoregressive
), "Please set task.autoregressive=true for seq2seq asr models"
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
encoder = cls.build_encoder(cfg)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
return Wav2Vec2Seq2SeqModel(encoder, decoder)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig):
return Wav2VecEncoder(cfg)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens):
return TransformerDecoder(cfg, tgt_dict, embed_tokens)
def forward(self, **kwargs):
encoder_out = self.encoder(**kwargs)
decoder_out = self.decoder(encoder_out=encoder_out, **kwargs)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
class Wav2VecEncoder(FairseqEncoder):
def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_before": cfg.mask_channel_before,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
w2v_args.criterion = None
w2v_args.lr_scheduler = None
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for both pre-training and here"
)
if hasattr(cfg, "checkpoint_activations") and cfg.checkpoint_activations:
with open_dict(w2v_args):
w2v_args.model.checkpoint_activations = cfg.checkpoint_activations
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
self.load_model_weights(state, model, cfg)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
targ_d = None
self.proj = None
if output_size is not None:
targ_d = output_size
elif getattr(cfg, "decoder_embed_dim", d) != d:
targ_d = cfg.decoder_embed_dim
if targ_d is not None:
self.proj = Linear(d, targ_d)
def load_model_weights(self, state, model, cfg):
if cfg.ddp_backend == "fully_sharded":
from fairseq.distributed import FullyShardedDataParallel
for name, module in model.named_modules():
if "encoder.layers" in name and len(name.split(".")) == 3:
# Only for layers, we do a special handling and load the weights one by one
# We dont load all weights together as that wont be memory efficient and may
# cause oom
new_dict = {
k.replace(name + ".", ""): v
for (k, v) in state["model"].items()
if name + "." in k
}
assert isinstance(module, FullyShardedDataParallel)
with module.summon_full_params():
module.load_state_dict(new_dict, strict=True)
module._reset_lazy_init()
# Once layers are loaded, filter them out and load everything else.
r = re.compile("encoder.layers.\d.")
filtered_list = list(filter(r.match, state["model"].keys()))
new_big_dict = {
k: v for (k, v) in state["model"].items() if k not in filtered_list
}
model.load_state_dict(new_big_dict, strict=False)
else:
model.load_state_dict(state["model"], strict=True)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
res = self.w2v_model.extract_features(**w2v_args)
x = res["x"]
padding_mask = res["padding_mask"]
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
encoder_raw_out = x
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_raw_out": encoder_raw_out, # B x T x C
"encoder_padding_mask": padding_mask
if padding_mask is not None
else None, # B x T
"padding_mask": padding_mask, # B x T,
"layer_results": res["layer_results"],
}
def forward_torchscript(self, net_input):
if torch.jit.is_scripting():
return self.forward(net_input["source"], net_input["padding_mask"])
else:
return self.forward_non_torchscript(net_input)
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out["padding_mask"].index_select(
0, new_order
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg: Wav2Vec2Seq2SeqConfig,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim**-0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
self_attn_padding_mask = None
if prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
self_attn_padding_mask=self_attn_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
if self.share_input_output_embed:
return F.linear(features, self.embed_tokens.weight)
else:
return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 25,307
| 34.395804
| 96
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/wav2vec2_nar_cif_asr.py
|
# @Time : 2021/11/1
# @Author : Minglun Han
# @File : wav2vec2_nar_cif_asr.py
from argparse import Namespace
import logging
import sys
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any, Optional
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqDecoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
from fairseq.models.wav2vec.wav2vec2 import (
MASKING_DISTRIBUTION_CHOICES,
TransformerSentenceEncoderLayer,
)
from fairseq.modules import (
LayerNorm,
PositionalEmbedding,
TransformerDecoderLayer,
)
from fairseq.models.wav2vec.wav2vec2_asr import (
Wav2VecEncoder,
Wav2Vec2AsrConfig,
Embedding,
Linear,
)
@dataclass
class Wav2Vec2NarCIFConfig(Wav2Vec2AsrConfig):
# Decoder settings
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
autoregressive: bool = II("task.autoregressive")
decoder_mode: str = field(
default="proj",
metadata={
"help": "the mode of decoder, there are three options: ar_decoder, nar_decoder, proj"
},
)
pre_final_proj_dim: int = field(default=768)
# CIF settings
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder output embedding dimension"}
)
cif_embedding_dim: int = field(
default=768, metadata={"help": "cif output embedding dimension"}
)
produce_weight_type: str = field(
default="conv",
metadata={"help": "the style of weight generation from encoder outputs"},
)
cif_threshold: float = field(
default=0.99,
metadata={"help": "the threshold of accumulated weight for firing"},
)
conv_cif_layer_num: int = field(
default=1, metadata={"help": "the number of cif convolution layers"}
)
conv_cif_width: int = field(
default=3, metadata={"help": "the width of kernel of CIF convolution layer"}
)
conv_cif_output_channels_num: int = field(
default=768, metadata={"help": "the number of CIF convolution output channels"}
)
conv_cif_dropout: float = field(
default=0.0,
metadata={"help": "the dropout rate of the final convolutional layer"},
)
dense_cif_units_num: int = field(
default=768,
metadata={"help": "the projection size of dense cif weight projection"},
)
apply_scaling: bool = field(
default=True, metadata={"help": "scale the summation of all weights"}
)
apply_tail_handling: bool = field(
default=True,
metadata={"help": "handle the tails of cif weights with special strategy"},
)
tail_handling_firing_threshold: float = field(
default=0.5, metadata={"help": "the firing threshold of tail handling"}
)
add_cif_ctxt_layers: bool = field(
default=False,
metadata={
"help": "whether use extra encoding layers to contextualize cif outputs"
},
)
cif_ctxt_layers: int = field(default=2)
cif_ctxt_embed_dim: int = field(default=768)
cif_ctxt_ffn_embed_dim: int = field(default=3072)
cif_ctxt_attention_heads: int = field(default=8)
cif_ctxt_dropout: float = field(default=0.1)
cif_ctxt_activation_dropout: float = field(default=0.0)
cif_ctxt_attention_dropout: float = field(default=0.1)
cif_ctxt_normalize_before: bool = field(default=True)
class CifMiddleware(nn.Module):
def __init__(self, cfg):
super().__init__()
# Get configurations related to continuous integrate-and-fire
self.cif_threshold = cfg.cif_threshold
self.cif_output_dim = cfg.cif_embedding_dim
self.encoder_embed_dim = cfg.encoder_embed_dim
self.produce_weight_type = cfg.produce_weight_type
self.apply_scaling = cfg.apply_scaling
self.apply_tail_handling = cfg.apply_tail_handling
self.tail_handling_firing_threshold = cfg.tail_handling_firing_threshold
self.add_cif_ctxt_layers = cfg.add_cif_ctxt_layers
# Build weight projection layer to compute weight from encoder outputs
if self.produce_weight_type == "dense":
self.dense_proj = Linear(
self.encoder_embed_dim, cfg.dense_cif_units_num
).cuda()
self.weight_proj = Linear(cfg.dense_cif_units_num, 1).cuda()
elif self.produce_weight_type == "conv":
self.cif_conv_layer_num = cfg.conv_cif_layer_num
self.conv = torch.nn.Conv1d(
self.encoder_embed_dim,
cfg.conv_cif_output_channels_num,
cfg.conv_cif_width,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
).cuda()
self.conv_dropout = torch.nn.Dropout(p=cfg.conv_cif_dropout).cuda()
self.weight_proj = Linear(cfg.conv_cif_output_channels_num, 1).cuda()
else:
self.weight_proj = Linear(self.encoder_embed_dim, 1).cuda()
# Build the final projection layer for cif outputs
if self.cif_output_dim != self.encoder_embed_dim:
self.cif_output_proj = Linear(
self.encoder_embed_dim, self.cif_output_dim, bias=False
).cuda()
# Build cif contextual layers
if self.add_cif_ctxt_layers:
self.cif_ctxt_embed_dim = cfg.cif_ctxt_embed_dim
self.cif_ctxt_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=cfg.cif_ctxt_embed_dim,
ffn_embedding_dim=cfg.cif_ctxt_ffn_embed_dim,
num_attention_heads=cfg.cif_ctxt_attention_heads,
dropout=cfg.cif_ctxt_dropout,
activation_dropout=cfg.cif_ctxt_activation_dropout,
attention_dropout=cfg.cif_ctxt_attention_dropout,
layer_norm_first=cfg.cif_ctxt_normalize_before,
)
for _ in range(cfg.cif_ctxt_layers)
]
)
def forward(self, encoder_outputs, target_lengths, **kwargs):
# Collect inputs
encoder_raw_outputs = encoder_outputs["encoder_raw_out"] # B x T x C
encoder_padding_mask = encoder_outputs["encoder_padding_mask"] # B x T
# Convert boolean value to integer type
# encoder_raw_outputs should have shape B x T x C
# targets_length should have shape B
# encoder_padding_mask should have shape B x T
# print(encoder_raw_outputs.size())
# print(encoder_padding_mask.size())
# Produce weights
if self.produce_weight_type == "dense":
proj_out = self.dense_proj(encoder_raw_outputs)
act_proj_out = torch.relu(proj_out)
sig_input = self.weight_proj(act_proj_out)
weight = torch.sigmoid(sig_input)
# weight has shape [batch_size, length, 1]
elif self.produce_weight_type == "conv":
conv_input = encoder_raw_outputs.permute(0, 2, 1)
# Adjust the shape of convolution layer input [B, C_in, T]
conv_out = self.conv(conv_input)
# conv_out has shape [B, C_out, T]
proj_input = conv_out.permute(0, 2, 1)
proj_input = self.conv_dropout(proj_input)
# Adjust conv output to shape [B, T, C_cif]
sig_input = self.weight_proj(proj_input)
weight = torch.sigmoid(sig_input)
else:
sig_input = self.weight_proj(encoder_raw_outputs)
weight = torch.sigmoid(sig_input)
not_padding_mask = ~encoder_padding_mask
# print(not_padding_mask.size())
# print(torch.squeeze(weight, dim=-1).size())
weight = (
torch.squeeze(weight, dim=-1) * not_padding_mask.int()
) # weight has shape B x T
org_weight = weight
# Sum weights
if self.training and self.apply_scaling and target_lengths is not None:
# if self.apply_scaling and target_lengths is not None: # For validation debugging
# Conduct scaling when training
# (target_lengths + 1 because this target_lengths does not take <eos> into consideration)
weight_sum = weight.sum(-1) # weight_sum has shape [batch_size]
normalize_scalar = torch.unsqueeze(
target_lengths / weight_sum, -1
) # normalize_scalar has shape [batch_size, 1]
weight = weight * normalize_scalar
# Integrate and fire
batch_size = encoder_raw_outputs.size(0)
max_length = encoder_raw_outputs.size(1)
encoder_embed_dim = encoder_raw_outputs.size(2)
padding_start_id = not_padding_mask.sum(-1) # shape B
# Initialize
accumulated_weights = torch.zeros(batch_size, 0).cuda()
accumulated_states = torch.zeros(batch_size, 0, encoder_embed_dim).cuda()
fired_states = torch.zeros(batch_size, 0, encoder_embed_dim).cuda()
# Begin integrate and fire
for i in range(max_length):
# Get previous states from the recorded tensor
prev_accumulated_weight = (
torch.zeros([batch_size]).cuda()
if i == 0
else accumulated_weights[:, i - 1]
)
prev_accumulated_state = (
torch.zeros([batch_size, encoder_embed_dim]).cuda()
if i == 0
else accumulated_states[:, i - 1, :]
)
# Decide whether positioning a boundary
cur_is_fired = (
(prev_accumulated_weight + weight[:, i]) >= self.cif_threshold
).unsqueeze(dim=-1)
# cur_is_fired with shape [batch_size, 1]
# Update the accumulated weights by considering whether positioning a boundary
cur_weight = torch.unsqueeze(weight[:, i], -1)
# cur_weight has shape [batch_size, 1]
prev_accumulated_weight = torch.unsqueeze(prev_accumulated_weight, -1)
# prev_accumulated_weight also has shape [batch_size ,1]
remained_weight = (
torch.ones_like(prev_accumulated_weight).cuda()
- prev_accumulated_weight
)
# remained_weight with shape [batch_size ,1]
# Obtain the accumulated weight of current step
cur_accumulated_weight = torch.where(
cur_is_fired,
cur_weight - remained_weight,
cur_weight + prev_accumulated_weight,
) # [batch_size, 1]
# Obtain accumulated state of current step
cur_accumulated_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
(cur_weight - remained_weight) * encoder_raw_outputs[:, i, :],
prev_accumulated_state + cur_weight * encoder_raw_outputs[:, i, :],
) # [batch_size, encoder_embed_dim]
# Obtain fired state of current step:
# firing locations has meaningful representations, while non-firing locations is all-zero embeddings
cur_fired_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
prev_accumulated_state + remained_weight * encoder_raw_outputs[:, i, :],
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
) # shape = [batch_size, encoder_embed_dim]
# Handling the speech tail by rounding up and down
if (not self.training) and self.apply_tail_handling:
# When encoder output position exceeds the max valid position,
# if accumulated weights is greater than tail_handling_firing_threshold,
# current state should be reserved, otherwise it is discarded.
cur_fired_state = torch.where(
i
== padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
),
# shape = [batch_size, encoder_embed_dim]
torch.where(
cur_accumulated_weight.repeat([1, encoder_embed_dim])
<= self.tail_handling_firing_threshold,
# shape = [batch_size, encoder_embed_dim]
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
# less equal than tail_handling_firing_threshold, discarded.
cur_accumulated_state / (cur_accumulated_weight + 1e-10)
# bigger than tail_handling_firing_threshold, normalized and kept.
),
cur_fired_state,
)
# shape = [batch_size, encoder_embed_dim]
# For normal condition, including both training and evaluation
# Mask padded locations with all-zero embeddings
cur_fired_state = torch.where(
torch.full([batch_size, encoder_embed_dim], i).cuda()
> padding_start_id.unsqueeze(dim=-1).repeat([1, encoder_embed_dim]),
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
cur_fired_state,
)
# Update accumulated arguments
accumulated_weights = torch.cat(
(accumulated_weights, cur_accumulated_weight), 1
) # shape = [batch_size, Len]
accumulated_states = torch.cat(
(accumulated_states, torch.unsqueeze(cur_accumulated_state, 1)), 1
) # shape = [B, L, D]
fired_states = torch.cat(
(fired_states, torch.unsqueeze(cur_fired_state, 1)), 1
) # shape = [B, L, D]
# Extracts cif_outputs for each utterance
fired_marks = (
torch.abs(fired_states).sum(-1) != 0.0
).int() # [batch_size, max_length]
fired_utt_length = fired_marks.sum(-1) # [batch_size]
fired_max_length = (
fired_utt_length.max().int()
) # The maximum of fired times in current batch
cif_outputs = torch.zeros(
[0, fired_max_length, encoder_embed_dim]
).cuda() # Initialize cif outputs
def dynamic_partition(
data: torch.Tensor, partitions: torch.Tensor, num_partitions=None
):
assert (
len(partitions.shape) == 1
), "Only one dimensional partitions supported"
assert (
data.shape[0] == partitions.shape[0]
), "Partitions requires the same size as data"
if num_partitions is None:
num_partitions = max(torch.unique(partitions))
return [data[partitions == i] for i in range(num_partitions)]
for j in range(batch_size):
# Get information of j-th sample
cur_utt_fired_mark = fired_marks[j, :]
cur_utt_fired_state = fired_states[j, :, :]
cur_utt_outputs = dynamic_partition(
cur_utt_fired_state, cur_utt_fired_mark, 2
)
cur_utt_output = cur_utt_outputs[1] # Get integrated representations
cur_utt_length = cur_utt_output.size(0) # The total number of firing
pad_length = fired_max_length - cur_utt_length # Calculate padding length
cur_utt_output = torch.cat(
(
cur_utt_output,
torch.full([pad_length, encoder_embed_dim], 0.0).cuda(),
),
dim=0,
) # Pad current utterance cif outputs to fired_max_length
cur_utt_output = torch.unsqueeze(cur_utt_output, 0)
# Reshape to [1, fired_max_length, encoder_embed_dim]
# Concatenate cur_utt_output and cif_outputs along batch axis
cif_outputs = torch.cat([cif_outputs, cur_utt_output], 0)
cif_out_padding_mask = (torch.abs(cif_outputs).sum(-1) != 0.0).int()
# cif_out_padding_mask shape = [batch_size, fired_max_length], where locations with value 0 is False.
if self.training:
# In training phase, use the sum of original weights as quantity out for quantity loss
quantity_out = org_weight.sum(-1)
else:
quantity_out = weight.sum(-1)
if self.cif_output_dim != encoder_embed_dim:
cif_outputs = self.cif_output_proj(cif_outputs)
ctxt_cif_outputs = None
if self.add_cif_ctxt_layers and self.cif_output_dim == self.cif_ctxt_embed_dim:
x = cif_outputs.transpose(0, 1)
padding_mask = ~cif_out_padding_mask.bool()
for layer in self.cif_ctxt_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
ctxt_cif_outputs = x.transpose(0, 1)
return {
"cif_out": cif_outputs, # shape = [batch_size, fired_max_length, cif_output_dim]
"ctxt_cif_out": ctxt_cif_outputs, # shape = [batch_size, fired_max_length, cif_ctxt_embed_dim]
"quantity_out": quantity_out, # shape = [batch_size]
"cif_out_padding_mask": cif_out_padding_mask, # shape = [batch_size, fired_max_length]
}
@register_model("wav2vec_nar_cif", dataclass=Wav2Vec2NarCIFConfig)
class Wav2Vec2NarCIF(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, cif):
# Build encoder and decoder part
super().__init__(encoder, decoder)
# Build continuous integrate and fire module
self.cif = cif
@classmethod
def build_model(cls, cfg: Wav2Vec2NarCIFConfig, task: FairseqTask):
"""Build a new model instance."""
assert (
cfg.autoregressive
), "Please set task.autoregressive=true for seq2seq asr models"
# Obtain dictionary
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
# Register important tokens, such as [CLS] and [SEP]
cls.bos_id = tgt_dict.bos() # <bos>
cls.pad_id = tgt_dict.pad() # <pad>
cls.eos_id = tgt_dict.eos() # <eos>
cls.unk_id = tgt_dict.unk() # <unk>
# Build the whole model
encoder = cls.build_encoder(cfg, vocab_size=len(tgt_dict))
cif = cls.build_cif_middleware(cfg)
decoder = cls.build_decoder(cfg, tgt_dict)
return Wav2Vec2NarCIF(encoder, decoder, cif)
@classmethod
def build_encoder(cls, cfg: Wav2Vec2AsrConfig, vocab_size=None):
return Wav2VecEncoder(cfg, vocab_size)
@classmethod
def build_decoder(cls, cfg: Wav2Vec2NarCIFConfig, tgt_dict):
cls.decoder_mode = cfg.decoder_mode
if cfg.decoder_mode == "nar_decoder":
return CifNarTransformerDecoder(cfg, tgt_dict)
elif cfg.decoder_mode == "proj":
return CifProjDecoder(cfg, tgt_dict)
else:
# Default Settings: proj option
return CifProjDecoder(cfg, tgt_dict)
@classmethod
def build_cif_middleware(cls, cfg: Wav2Vec2NarCIFConfig):
return CifMiddleware(cfg)
def forward(self, target_lengths_with_eos=None, **kwargs):
# Forward ASR model (for speech recogntion)
encoder_out = self.encoder(tbc=False, **kwargs)
cif_out = self.cif(
encoder_out, target_lengths_with_eos if self.training else None, **kwargs
)
# cif_out = self.cif(encoder_out, target_lengths_with_eos, **kwargs) # For validation debugging
decoder_out = self.decoder(cif_out=cif_out, **kwargs)
model_outputs = {
# Encoder outputs
"encoder_out": encoder_out[
"encoder_out"
], # Encoder out for CTC calculation
"encoder_raw_out": encoder_out[
"encoder_raw_out"
], # Encoder raw outputs without projection
"encoder_padding_mask": encoder_out["padding_mask"], # B x T
"padding_mask": encoder_out["padding_mask"], # B x T
# Cif outputs
"quantity_out": cif_out[
"quantity_out"
], # Quantity out for quantity loss calculation
"cif_out": cif_out["cif_out"], # CIF out for decoder prediction, B x T x C
"ctxt_cif_out": cif_out[
"ctxt_cif_out"
], # Contextualized cif outputs, B x T x C
"cif_out_padding_mask": cif_out["cif_out_padding_mask"], # B x T
# Decoder outputs
"decoder_out": decoder_out, # Decoder outputs
}
return model_outputs
def get_ctc_output(self, **kwargs):
encoder_outputs = self.encoder(tbc=False, **kwargs)
ctc_outputs = encoder_outputs["encoder_out"]
encoder_outputs_padding_mask = encoder_outputs["encoder_padding_mask"]
return ctc_outputs, encoder_outputs_padding_mask
def get_cif_output(self, target_lengths_with_eos=None, **kwargs):
# Fetch the outputs of CifMiddleware
encoder_outputs = self.encoder(tbc=False, **kwargs)
cif_out = self.cif(
encoder_outputs,
target_lengths_with_eos if self.training else None,
**kwargs
)
return cif_out
def step_forward_decoder(
self,
prev_decoded_tokens=None,
cif_outputs=None,
incremental_state=None,
**kwargs
):
"""
forward decoder with one step
"""
step_decoder_out, extra_outputs = self.decoder(
prev_output_tokens=prev_decoded_tokens,
cif_out=cif_outputs,
incremental_state=incremental_state,
**kwargs
)
return step_decoder_out, extra_outputs
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""
Get normalized probabilities (or log probs) from logits.
"""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
class CifProjDecoder(FairseqDecoder):
def __init__(self, cfg, dictionary):
super().__init__(dictionary)
# Load parameters and build model
self.pre_final_proj_dim = cfg.pre_final_proj_dim
self.output_dim = len(self.dictionary)
self.output_proj = Linear(self.pre_final_proj_dim, self.output_dim).cuda()
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = (
cif_out["ctxt_cif_out"]
if cif_out["ctxt_cif_out"] is not None
else cif_out["cif_out"]
)
# Collect shape information
batch_size, cif_len, cif_embed_dim = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim]).cuda()
# Regularize the length of input tokens and cif outputs
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
# Forword decoder
final_logits = self.output_proj(x)
return final_logits, None
class CifNarTransformerDecoder(CifProjDecoder):
def __init__(self, cfg, dictionary):
super().__init__(cfg, dictionary)
# Load decoder parameters
self.decoder_layers = cfg.decoder_layers
self.decoder_embed_dim = cfg.decoder_embed_dim
self.decoder_ffn_embed_dim = cfg.decoder_ffn_embed_dim
self.decoder_attention_heads = cfg.decoder_attention_heads
self.decoder_normalize_before = cfg.decoder_normalize_before
self.decoder_dropout = cfg.decoder_dropout
self.decoder_attention_dropout = cfg.decoder_attention_dropout
self.decoder_activation_dropout = cfg.decoder_activation_dropout
assert (
self.decoder_embed_dim == self.pre_final_proj_dim
), "ensure that the dimension of decoder outputs is equal to pre_final_proj_dim"
# Build decoder stacks
self.decoder_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.decoder_embed_dim,
ffn_embedding_dim=self.decoder_ffn_embed_dim,
num_attention_heads=self.decoder_attention_heads,
dropout=self.decoder_dropout,
activation_dropout=self.decoder_activation_dropout,
attention_dropout=self.decoder_attention_dropout,
layer_norm_first=self.decoder_normalize_before,
)
for _ in range(cfg.decoder_layers)
]
)
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = (
cif_out["ctxt_cif_out"]
if cif_out["ctxt_cif_out"] is not None
else cif_out["cif_out"]
)
padding_mask = ~cif_out["cif_out_padding_mask"].bool()
# Collect shape information
batch_size, cif_len, cif_embed_dim = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim]).cuda() # B x 1 x C
padding_mask = torch.zeros([batch_size, cif_len]).cuda() # B x 1
# Regularize the length of input tokens and cif outputs, and padding_mask
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
padding_mask = padding_mask[:, :min_len] # B x min_len
# Forward decoder
x = x.transpose(0, 1) # T x B x C
for layer in self.decoder_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
x = x.transpose(0, 1) # B x T x C
final_logits = self.output_proj(x)
return final_logits, None
| 28,462
| 39.37305
| 112
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/wav2vec2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
SamePad,
TransposeLast,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
from fairseq.distributed import fsdp_wrap
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
@register_model("wav2vec2", dataclass=Wav2Vec2Config)
class Wav2Vec2Model(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2Config):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if cfg.same_quantizer and self.quantizer is not None:
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
self.input_quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars,
temp=cfg.latent_temp,
groups=cfg.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
@classmethod
def build_model(cls, cfg: Wav2Vec2Config, task=None):
"""Build a new model instance."""
return cls(cfg)
def apply_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2**30)
if not hasattr(self, "_inftensor"):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def forward(
self,
source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
# print("batch size: ")
# print(source.size(0))
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
# if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
features = self.project_inp(features)
if mask:
x, mask_indices = self.apply_mask(
features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x) and mask_indices is not None:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y = unmasked_features[mask_indices].view(
unmasked_features.size(0), -1, unmasked_features.size(-1)
)
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
x, layer_results = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {
"x": x,
"padding_mask": padding_mask,
"features": unmasked_features,
"layer_results": layer_results,
}
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q["x"]
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
"x"
]
negs, _ = self.sample_negatives(
neg_cands,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {
"x": x,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False, layer=None):
res = self.forward(
source, padding_mask, mask=mask, features_only=True, layer=layer
)
return res
def get_logits(self, net_output):
logits = net_output["x"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output["x"]
# print(x.size())
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
layers = []
for _ in range(args.encoder_layers):
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
if args.checkpoint_activations:
layer = fsdp_wrap(layer)
layer = checkpoint_wrapper(layer)
layers.append(layer)
self.layers = nn.ModuleList(layers)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| 34,747
| 32.768707
| 119
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/speech_text_pretraining_model.py
|
# @Time : 2021/11/24
# @Author : Minglun Han
# @File : speech_text_pretraining_model.py
import math
import random
import logging
from dataclasses import dataclass, field
from typing import List, Tuple, Dict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
SamePad,
TransposeLast,
)
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
from fairseq.distributed import fsdp_wrap
from fairseq.tasks import FairseqTask
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class Wav2Vec2Config(FairseqDataclass):
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
conv_feature_layers: str = field(
default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=True, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
checkpoint_activations: bool = field(
default=False,
metadata={"help": "recompute activations and save memory for extra compute"},
)
@dataclass
class SpeechTextPretrainingConfig(Wav2Vec2Config):
# Add configurations of cross_modal_encoder
cross_modal_encoder_layers: int = field(
default=6, metadata={"help": "the number of layers of the cross modal encoder."}
)
cross_modal_encoder_embed_dim: int = field(
default=768,
metadata={"help": "the embedding dimension of the cross modal encoder."},
)
cross_modal_encoder_ffn_dim: int = field(
default=3072,
metadata={"help": "the feed forward dimension of the cross modal encoder."},
)
cross_modal_encoder_num_heads: int = field(
default=12, metadata={"help": "the number of heads of the cross modal encoder."}
)
ce_encoder_layer_norm_first: bool = field(default=False)
ce_encoder_inputs_dropout: float = field(default=0.1)
disable_ce_encoder: bool = field(
default=False, metadata={"help": "whether to disable cross modal encoder"}
)
# Add configurations of text encoder module
no_scale_text_embedding: bool = field(
default=True, metadata={"help": "whether to scale text embeddings."}
)
max_text_seq_positions: int = field(
default=1024,
metadata={"help": "Maximum input length supported by the positional encoding."},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={"help": "whether to use position embeddings for token inputs"},
)
learned_pos: bool = field(
default=False,
metadata={
"help": "whether to employ positional embedding, otherwise employ positional encoding."
},
)
text_input_dropout: float = field(
default=0.1,
metadata={
"help": "the dropout rate for the input text embeddings of text encoder."
},
)
text_encoder_layers: int = field(
default=6, metadata={"help": "the number of layers of the cross modal encoder."}
)
text_encoder_embed_dim: int = field(
default=768,
metadata={"help": "the embedding dimension of the cross modal encoder."},
)
text_encoder_ffn_dim: int = field(
default=3072,
metadata={"help": "the feed forward dimension of the cross modal encoder."},
)
text_encoder_num_heads: int = field(
default=12, metadata={"help": "the number of heads of the cross modal encoder."}
)
text_encoder_layer_norm_first: bool = field(default=False)
# cif settings
cif_input_embed_dim: int = field(
default=768, metadata={"help": "encoder output embedding dimension"}
)
cif_embedding_dim: int = field(
default=512, metadata={"help": "cif output embedding dimension"}
)
produce_weight_type: str = field(
default="conv",
metadata={"help": "the style of weight generation from encoder outputs"},
)
cif_threshold: float = field(
default=0.99,
metadata={"help": "the threshold of accumulated weight for firing"},
)
conv_cif_layer_num: int = field(
default=1, metadata={"help": "the number of cif convolution layers"}
)
conv_cif_width: int = field(
default=3, metadata={"help": "the width of kernel of CIF convolution layer"}
)
conv_cif_output_channels_num: int = field(
default=768, metadata={"help": "the number of CIF convolution output channels"}
)
conv_cif_dropout: float = field(
default=0.0,
metadata={"help": "the dropout rate of the final convolutional layer"},
)
dense_cif_units_num: int = field(
default=768,
metadata={"help": "the projection size of dense cif weight projection"},
)
apply_scaling: bool = field(
default=True, metadata={"help": "scale the summation of all weights"}
)
apply_tail_handling: bool = field(
default=True,
metadata={"help": "handle the tails of cif weights with special strategy"},
)
tail_handling_firing_threshold: float = field(
default=0.5, metadata={"help": "the firing threshold of tail handling"}
)
add_cif_ctxt_layers: bool = field(
default=False,
metadata={
"help": "whether use extra encoding layers to contextualize cif outputs"
},
)
cif_ctxt_layers: int = field(
default=2, metadata={"help": "the number of context layers for cif outputs"}
)
cif_ctxt_embed_dim: int = field(
default=768,
metadata={"help": "the embedding dimension of context layers for cif outputs"},
)
cif_ctxt_ffn_embed_dim: int = field(
default=3072,
metadata={
"help": "the feed forward network dimension of context layers for cif outputs"
},
)
cif_ctxt_attention_heads: int = field(
default=8,
metadata={
"help": "the number of attention heads of context layers for cif outputs"
},
)
cif_ctxt_dropout: float = field(
default=0.1, metadata={"help": "the dropout of context layers for cif outputs"}
)
cif_ctxt_activation_dropout: float = field(
default=0.0,
metadata={"help": "the actiavtion dropout of context layers for cif outputs"},
)
cif_ctxt_attention_dropout: float = field(
default=0.1,
metadata={"help": "the attention dropout of context layers for cif outputs"},
)
cif_ctxt_normalize_before: bool = field(
default=True,
metadata={
"help": "whether to conduct nromalization before get into next sub-block"
},
)
# nar decoder settings
nar_asr_decoder_mode: str = field(
default="nar_decoder",
metadata={
"help": "the mode of decoder, there are three options: ar_decoder, nar_decoder, proj"
},
)
pre_final_proj_dim: int = field(default=768)
nar_decoder_layers: int = field(default=2)
nar_decoder_embed_dim: int = field(default=512)
nar_decoder_ffn_dim: int = field(default=2048)
nar_decoder_num_heads: int = field(default=8)
nar_decoder_dropout: float = field(default=0.1)
nar_decoder_activation_dropout: float = field(default=0.1)
nar_decoder_attention_dropout: float = field(default=0.1)
# settings about the masking stratetgies for text inputs
learned_text_mask_emb: bool = field(
default=True,
metadata={
"help": "whether to employ a trainable mask embedding for mask positions."
},
)
mlm_text_mask_prob: float = field(
default=0.15,
metadata={
"help": "the masking probability for masked language modeling over text."
},
)
mlm_text_mask_span_length: int = field(
default=1, metadata={"help": "the length of masked span for mlm."}
)
tlm_text_mask_prob: float = field(
default=0.70,
metadata={
"help": "the masking probability for translation language modeling over text."
},
)
tlm_text_mask_span_length: int = field(
default=1, metadata={"help": "the length of masked span for tlm."}
)
tlm_spec_mask_prob: float = field(
default=0.70,
metadata={
"help": "the masking probability for translation language modeling over speech."
},
)
tlm_spec_mask_span_length: int = field(
default=10, metadata={"help": "the length of masked span for tlm over speech."}
)
@register_model("speech_text_pretraining", dataclass=SpeechTextPretrainingConfig)
class SpeechTextPretrainingModel(BaseFairseqModel):
def __init__(self, cfg: SpeechTextPretrainingConfig, task: FairseqTask):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
# Obtain default_dictionary
self.default_dict = task.default_dictionary
# build conv feat extractor
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
# Register mask configurations for audios
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
# Register mask configurations for text inputs
self.learned_text_mask_emb = cfg.learned_text_mask_emb
self.mlm_text_mask_prob = cfg.mlm_text_mask_prob
self.mlm_text_mask_span_length = cfg.mlm_text_mask_span_length
self.tlm_text_mask_prob = cfg.tlm_text_mask_prob
self.tlm_text_mask_span_length = cfg.tlm_text_mask_span_length
# Register mask configuration for acoustics in TLM
self.tlm_spec_mask_prob = cfg.tlm_spec_mask_prob
self.tlm_spec_mask_span_length = cfg.tlm_spec_mask_span_length
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.embed,
num_vars=cfg.latent_vars, # V
temp=cfg.latent_temp,
groups=cfg.latent_groups, # G
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
# A shared trainable vector for the replacement of mask locations
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
if cfg.learned_text_mask_emb:
self.text_mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
else:
self.text_mask_emb = torch.zeros(cfg.encoder_embed_dim)
self.speech_encoder = TransformerEncoder(cfg)
self.conv_layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
## build position encoding or embedding for text encoder layers
self.text_encoder_layers = cfg.text_encoder_layers
self.text_encoder_embed_dim = cfg.text_encoder_embed_dim
self.text_encoder_ffn_dim = cfg.text_encoder_ffn_dim
self.text_encoder_num_heads = cfg.text_encoder_num_heads
self.text_encoder_layer_norm_first = cfg.text_encoder_layer_norm_first
self.embed_scale = (
1.0
if cfg.no_scale_text_embedding
else math.sqrt(self.text_encoder_embed_dim)
)
self.text_embedding_layer = self.build_embedding(
self.default_dict, self.text_encoder_embed_dim, path=None
)
self.text_embed_positions = (
PositionalEmbedding(
cfg.max_text_seq_positions,
self.text_encoder_embed_dim,
self.default_dict.pad(),
learned=cfg.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
self.dropout_text_inputs = nn.Dropout(cfg.text_input_dropout)
self.cls_emb = nn.Parameter(
torch.FloatTensor(cfg.text_encoder_embed_dim).uniform_()
)
self.sep_emb = nn.Parameter(
torch.FloatTensor(cfg.text_encoder_embed_dim).uniform_()
)
self.text_encoder_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.text_encoder_embed_dim,
ffn_embedding_dim=self.text_encoder_ffn_dim,
num_attention_heads=self.text_encoder_num_heads,
dropout=cfg.dropout,
activation_dropout=cfg.activation_dropout,
attention_dropout=cfg.attention_dropout,
layer_norm_first=cfg.text_encoder_layer_norm_first,
)
for _ in range(self.text_encoder_layers)
]
)
self.text_encoder_layer_norm = LayerNorm(self.text_encoder_embed_dim)
## build cross-modal encoder (CE encoder)
self.disable_ce_encoder = cfg.disable_ce_encoder
self.cross_modal_encoder_layers = cfg.cross_modal_encoder_layers
self.cross_modal_encoder_embed_dim = cfg.cross_modal_encoder_embed_dim
self.cross_modal_encoder_ffn_dim = cfg.cross_modal_encoder_ffn_dim
self.cross_modal_encoder_num_heads = cfg.cross_modal_encoder_num_heads
self.ce_encoder_layer_norm_first = cfg.ce_encoder_layer_norm_first
self.dropout_ce_inputs = nn.Dropout(cfg.ce_encoder_inputs_dropout)
self.cross_modal_encoder_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.cross_modal_encoder_embed_dim,
ffn_embedding_dim=self.cross_modal_encoder_ffn_dim,
num_attention_heads=self.cross_modal_encoder_num_heads,
dropout=cfg.dropout,
activation_dropout=cfg.activation_dropout,
attention_dropout=cfg.attention_dropout,
layer_norm_first=cfg.ce_encoder_layer_norm_first,
)
for _ in range(self.cross_modal_encoder_layers)
]
)
self.ce_encoder_layer_norm = LayerNorm(self.cross_modal_encoder_embed_dim)
# build cls projection for speech-text matching
self.cls_proj = nn.Linear(self.cross_modal_encoder_embed_dim, 1)
# build masked language modeling projection for masked prediction
self.latent_vars = cfg.latent_vars
self.latent_groups = cfg.latent_groups
self.quantized_vocab_size = self.latent_vars**self.latent_groups
self.text_proj = nn.Linear(
self.cross_modal_encoder_embed_dim, len(self.default_dict)
)
self.spec_proj = nn.Linear(
self.cross_modal_encoder_embed_dim, self.latent_vars**self.latent_groups
)
# build asr decoder part
self.cif = CifMiddleware(cfg=cfg)
if cfg.nar_asr_decoder_mode == "proj":
self.nar_asr_decoder = NarProjAsrDecoder(cfg, self.default_dict)
elif cfg.nar_asr_decoder_mode == "nar_decoder":
self.nar_asr_decoder = NarTransformerAsrDecoder(cfg, self.default_dict)
else:
self.nar_asr_decoder = NarProjAsrDecoder(cfg, self.default_dict)
# build tts decoder part
# TODO: build tts decoder part
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
def build_embedding(self, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_model(cls, cfg: SpeechTextPretrainingConfig, task: FairseqTask = None):
"""Build a new model instance."""
return cls(cfg, task)
def apply_spec_channel_temporal_mask(
self,
x,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x.shape
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x = index_put(x, mask_channel_indices, 0)
return x, mask_indices
def apply_spec_temporal_mask(
self,
x,
padding_mask,
):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.tlm_spec_mask_prob,
self.tlm_spec_mask_span_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
return x, mask_indices
def apply_text_temporal_mask(
self,
x, # x is the text embeddings after text embedding layer
padding_mask,
text_mask_prob=None,
text_mask_length=None,
):
B, T, C = x.shape
mask_indices = None
if text_mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
text_mask_prob,
text_mask_length,
min_masks=1,
no_overlap=self.no_mask_overlap,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.text_mask_emb)
return x, mask_indices
def sample_negatives(self, y, num, padding_count=None):
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y.shape
y = y.view(-1, fsz) # BTC => (BxT)C
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz, tsz, fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view(-1)]
negs = negs.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat(
[y, negatives], dim=0
) # combine target with negative distractors
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2**30)
if not hasattr(self, "_inftensor"):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def select_samples(self, samples, indices):
selected_samples = dict()
for key in samples.keys():
if type(samples[key]) == torch.Tensor and samples[key].numel() > 1:
selected_samples[key] = torch.index_select(samples[key], 0, indices)
else:
continue
return selected_samples
def combine_samples(self, items, ordered_keys, input_samples_dict):
"""
:param items: the items wanna to combine;
:param ordered_keys: the ordered data labels list, all samples will be organized follow this data order list;
:param input_samples_dict: the input samples dictionary with data labe as keys;
:return: combined samples
"""
# Recombine all samples for convienence
recom_sample = dict()
for item in items:
if item == "ntokens":
continue
if item == "id":
continue
recom_sample[item] = torch.cat(
[input_samples_dict[data_label][item] for data_label in ordered_keys],
dim=0,
)
return recom_sample
def forward(
self,
sample,
mask_audio=True,
mask_text=True,
features_only=False,
mode=1,
):
# Collect all keys name
items = sample.keys()
# Split samples according to data labels
data_labels = sample["data_labels"]
onehot_text_indices = (data_labels == 0).int()
onehot_speech_indices = (data_labels == 1).int()
onehot_paired_indices = (data_labels == 2).int()
text_indices = torch.nonzero(onehot_text_indices).squeeze(-1)
speech_indices = torch.nonzero(onehot_speech_indices).squeeze(-1)
paired_indices = torch.nonzero(onehot_paired_indices).squeeze(-1)
num_text_samples = text_indices.size(0)
num_spec_samples = speech_indices.size(0)
num_pair_samples = paired_indices.size(0)
text_samples = self.select_samples(sample, text_indices)
speech_samples = self.select_samples(sample, speech_indices)
paired_samples = self.select_samples(sample, paired_indices)
stm_labels = None
num_pos_paired_samples = None
num_total_paired_samples = None
if mode == 2:
num_pos_paired_samples = num_pair_samples
num_total_paired_samples = num_pos_paired_samples * 2
assert (
num_pair_samples == num_spec_samples
), "please ensure that the number of speech samples equals that of paired samples"
negative_paired_samples = dict()
negative_paired_samples["data_labels"] = (
2
* torch.ones([num_total_paired_samples - num_pos_paired_samples])
.int()
.cuda()
)
for i in range(num_pos_paired_samples):
spec_sample_id = torch.tensor(
[random.randint(0, num_spec_samples - 1)]
).cuda()
text_sample_id = torch.tensor(
[random.randint(0, num_text_samples - 1)]
).cuda()
spec_sample = self.select_samples(speech_samples, spec_sample_id)
text_sample = self.select_samples(text_samples, text_sample_id)
# Combine samples with negative match label
for key in items:
if key == "ntokens":
continue
if key == "data_labels":
continue
if key == "id":
continue
if key == "text" or key == "target_lengths" or key == "target":
if key not in negative_paired_samples.keys():
negative_paired_samples[key] = [text_sample[key]]
else:
# negative_paired_samples[key] = negative_paired_samples[key].append(text_sample[key])
negative_paired_samples[key].append(text_sample[key])
else:
if key not in negative_paired_samples.keys():
negative_paired_samples[key] = [spec_sample[key]]
else:
# negative_paired_samples[key] = negative_paired_samples[key].append(spec_sample[key])
negative_paired_samples[key].append(spec_sample[key])
# Combine all single samples
for key in negative_paired_samples.keys():
if key == "data_labels":
continue
negative_paired_samples[key] = torch.cat(
negative_paired_samples[key], dim=0
)
# Combine positive and negative samples
for key in items:
if key == "ntokens":
continue
if key == "id":
continue
paired_samples[key] = torch.cat(
[paired_samples[key], negative_paired_samples[key]], dim=0
)
# Produce speech and text matching labels
stm_labels = torch.cat(
[
torch.ones([num_pos_paired_samples]),
torch.zeros([num_total_paired_samples - num_pos_paired_samples]),
],
dim=0,
).cuda()
num_pair_samples = num_total_paired_samples
all_samples = None
data_borders = None
if mode == 1:
all_samples = {
"text": text_samples,
"spec": speech_samples,
}
data_borders = {
"text": (0, num_text_samples),
"spec": (num_text_samples, num_text_samples + num_spec_samples),
}
del paired_samples
elif mode == 2:
all_samples = {
"text": text_samples,
"spec": speech_samples,
"pair": paired_samples,
}
data_borders = {
"text": (0, num_text_samples),
"spec": (num_text_samples, num_text_samples + num_spec_samples),
"pair": (
num_text_samples + num_spec_samples,
num_text_samples + num_spec_samples + num_pair_samples,
),
}
elif mode == 3:
all_samples = {
"pair": paired_samples,
}
data_borders = {
"pair": (0, num_pair_samples),
}
del speech_samples
del text_samples
elif mode == 4:
all_samples = {
"spec": speech_samples,
}
data_borders = {
"spec": (0, num_spec_samples),
}
elif mode == 5:
all_samples = {
"text": text_samples,
}
data_borders = {
"text": (0, num_text_samples),
}
else:
all_samples = None
data_borders = None
# Release some space for future forward
del data_labels
del onehot_text_indices
del onehot_speech_indices
del onehot_paired_indices
del text_indices
del speech_indices
del paired_indices
# Forward speech encoder part
spec_enc_data_borders = None
spec_enc_samples = None
if mode == 1 or mode == 4: # Single modal training
spec_enc_data_borders = {"spec": (0, num_spec_samples)}
spec_enc_samples = all_samples["spec"]
elif mode == 2: # Paired and unpaired training
spec_enc_data_borders = {
"spec": (0, num_spec_samples),
"pair": (num_spec_samples, num_spec_samples + num_pair_samples),
}
spec_enc_samples = self.combine_samples(
items, ["spec", "pair"], all_samples
)
elif mode == 3: # Only paired trainings
spec_enc_data_borders = {"pair": (0, num_pair_samples)}
spec_enc_samples = all_samples["pair"]
else:
spec_enc_data_borders = None
spec_enc_samples = None
speech_encoder_outputs = None
if spec_enc_samples is not None:
speech_encoder_outputs = self.forward_speech_encoder(
spec_enc_samples,
spec_enc_data_borders,
mask_audio=mask_audio,
)
# Forward text encoder part
text_enc_samples = None
text_enc_data_borders = None
if mode == 1 or mode == 5: # Single modal training
text_enc_data_borders = {"text": (0, num_text_samples)}
text_enc_samples = all_samples["text"]
elif mode == 2: # Paired and unpaired training
text_enc_data_borders = {
"text": (0, num_text_samples),
"pair": (num_text_samples, num_text_samples + num_pair_samples),
}
text_enc_samples = self.combine_samples(
items, ["text", "pair"], all_samples
)
elif mode == 3: # Only paired trainings
text_enc_data_borders = {"pair": (0, num_pair_samples)}
text_enc_samples = all_samples["pair"]
else:
text_enc_samples = None
text_enc_data_borders = None
text_encoder_outputs = None
if text_enc_samples is not None:
text_encoder_outputs = self.forward_text_encoder(
text_enc_samples, text_enc_data_borders, mask_text
)
# Prepare inputs for cross modal encoder
(
cse_inputs,
cse_padding_mask,
text_max_len,
spec_max_len,
) = self.prepare_cse_inputs(
text_encoder_outputs,
speech_encoder_outputs,
text_enc_data_borders,
spec_enc_data_borders,
data_borders,
)
# Forward the cross-modal encoder part
# Forward the pure text inputs
joint_outputs = self.forward_cross_modal_encoder(
cse_inputs, cse_padding_mask, text_max_len, spec_max_len, data_borders
)
cls_outputs = joint_outputs[:, 0, :].squeeze(1) # B x C
final_text_outputs = joint_outputs[:, 1 : (text_max_len + 1), :] # B x T_t x C
final_speech_outputs = joint_outputs[:, (text_max_len + 2) :, :] # B x T_s x C
result = {
"text_outputs": text_encoder_outputs["text_outputs"]
if text_encoder_outputs is not None
else None,
"text_enc_padding_mask": text_encoder_outputs["text_padding_mask"]
if text_encoder_outputs is not None
else None,
"speech_outputs": speech_encoder_outputs["speech_outputs"]
if speech_encoder_outputs is not None
else None,
"spec_enc_padding_mask": speech_encoder_outputs["speech_padding_mask"]
if speech_encoder_outputs is not None
else None,
"final_text_outputs": final_text_outputs,
"final_speech_outputs": final_speech_outputs,
"text_max_len": text_max_len,
"spec_max_len": spec_max_len,
"cse_padding_mask": cse_padding_mask,
"joint_outputs": joint_outputs,
"data_borders": data_borders,
}
if "pair" in data_borders.keys():
# Speech-text matching (STM)
cls_outputs = cls_outputs[
data_borders["pair"][0] : data_borders["pair"][1]
] # B x C
stm_logits = self.cls_proj(cls_outputs).squeeze(-1) # B x C --> B x 1 --> B
result["stm_logits"] = stm_logits
result["stm_labels"] = stm_labels
# Translation Language Modeling (TLM) for text part
paired_text_mask_indices = text_encoder_outputs[
"paired_text_mask_indices"
] # B x T
pos_paired_text_mask_indices = paired_text_mask_indices[
:num_pos_paired_samples, :
]
paired_text_tlm_outputs = final_text_outputs[
data_borders["pair"][0] : data_borders["pair"][1]
] # B x T x C
paired_text_tlm_outputs = paired_text_tlm_outputs[
:num_pos_paired_samples, :, :
][
pos_paired_text_mask_indices
] # B x T x C
paired_text_tlm_logits = self.text_proj(paired_text_tlm_outputs)
paired_text_tlm_targets = text_encoder_outputs["tlm_targets"]
paired_text = text_encoder_outputs["paired_text"]
result["paired_text_tlm_logits"] = paired_text_tlm_logits
result["paired_text_tlm_targets"] = paired_text[:num_pos_paired_samples, :][
pos_paired_text_mask_indices
]
# Translation Language Modeling (TLM) for speech part
paired_spec_mask_indices = speech_encoder_outputs[
"paired_spec_mask_indices"
]
paired_spec_tlm_outputs = final_speech_outputs[
data_borders["pair"][0] : data_borders["pair"][1]
] # B x T x C
paired_spec_tlm_outputs = paired_spec_tlm_outputs[
:num_pos_paired_samples, :, :
] # B/2 x T x C
paired_spec_tlm_outputs = paired_spec_tlm_outputs[
paired_spec_mask_indices[:num_pos_paired_samples, :]
]
paired_spec_tlm_logits = self.spec_proj(
paired_spec_tlm_outputs
) # B x T x V
pair_quantized_target_ids = speech_encoder_outputs[
"pair_quantized_target_ids"
]
if self.latent_groups == 1:
paired_spec_tlm_targets = pair_quantized_target_ids.squeeze(
-1
) # B x T_mask
paired_spec_tlm_targets = paired_spec_tlm_targets[
:num_pos_paired_samples
]
elif self.latent_groups == 2:
one_hot_raw_ids = (
torch.nn.functional.one_hot(
pair_quantized_target_ids[:, :, 0], self.latent_vars
)
.unsqueeze(2)
.repeat(1, 1, self.latent_vars, 1)
)
one_hot_col_ids = (
torch.nn.functional.one_hot(
pair_quantized_target_ids[:, :, -1], self.latent_vars
)
.unsqueeze(2)
.repeat(1, 1, self.latent_vars, 1)
)
ind = (
one_hot_raw_ids.transpose(-1, -2) * one_hot_col_ids
) # B x T x V x V
targets_id_pool = (
torch.tensor(list(range(0, self.quantized_vocab_size)))
.view([self.latent_vars] * self.latent_groups)
.unsqueeze(0)
.unsqueeze(0)
) # 1 x 1 x V x V
paired_spec_tlm_targets = (
(ind * targets_id_pool).sum(-1).sum(-1)
) # B x T
paired_spec_tlm_targets = paired_spec_tlm_targets[
:num_pos_paired_samples
]
else:
raise NotImplementedError("This is not supported until now.")
result["paired_spec_tlm_logits"] = paired_spec_tlm_logits
result["paired_spec_tlm_targets"] = paired_spec_tlm_targets
result["num_pair_samples"] = num_pair_samples
result["num_pos_paired_samples"] = num_pos_paired_samples
if "text" in data_borders.keys():
# Masked Language modeling (MLM)
unpaired_text_mask_indices = text_encoder_outputs[
"unpaired_text_mask_indices"
]
text_mlm_outputs = final_text_outputs[
data_borders["text"][0] : data_borders["text"][1]
] # B x T x C
text_mlm_outputs = text_mlm_outputs[unpaired_text_mask_indices]
text_mlm_logits = self.text_proj(text_mlm_outputs) # B x T x V
text_mlm_targets = text_encoder_outputs["mlm_targets"]
result["text_mlm_logits"] = text_mlm_logits
result["text_mlm_targets"] = text_mlm_targets
result["num_text_samples"] = num_text_samples
if "spec" in data_borders.keys():
# W2v-BERT speech masked language modeling (MLM)
unpaired_spec_mask_indices = speech_encoder_outputs[
"unpaired_spec_mask_indices"
]
spec_mlm_outputs = final_speech_outputs[
data_borders["spec"][0] : data_borders["spec"][1]
] # B x T x C
# Obtain w2v-bert speech mlm logits
spec_mlm_outputs = spec_mlm_outputs[
unpaired_spec_mask_indices
] # (B x T_mask) x V
spec_mlm_logits = self.spec_proj(spec_mlm_outputs) # (B x T_mask) x V
result["spec_mlm_logits"] = spec_mlm_logits # (B x T_mask) x V
# Obtain w2v-bert speech mlm targets
spec_quantized_target_ids = speech_encoder_outputs[
"spec_quantized_target_ids"
] # B x T_mask x G
if self.latent_groups == 1:
result["spec_mlm_targets"] = spec_quantized_target_ids.squeeze(
-1
) # B x T_mask
elif self.latent_groups == 2:
one_hot_raw_ids = (
torch.nn.functional.one_hot(
spec_quantized_target_ids[:, :, 0], self.latent_vars
)
.unsqueeze(2)
.repeat(1, 1, self.latent_vars, 1)
)
one_hot_col_ids = (
torch.nn.functional.one_hot(
spec_quantized_target_ids[:, :, -1], self.latent_vars
)
.unsqueeze(2)
.repeat(1, 1, self.latent_vars, 1)
)
ind = (
one_hot_raw_ids.transpose(-1, -2) * one_hot_col_ids
) # B x T_mask x V x V
targets_id_pool = (
torch.tensor(list(range(0, self.quantized_vocab_size)))
.view([self.latent_vars] * self.latent_groups)
.unsqueeze(0)
.unsqueeze(0)
.cuda()
) # 1 x 1 x V x V
spec_mlm_targets = (ind * targets_id_pool).sum(-1).sum(-1) # B x T_mask
result["spec_mlm_targets"] = spec_mlm_targets # B x T_mask
else:
raise NotImplementedError("This is not supported until now.")
# Contrastive Loss
contrastive_spec_logits = speech_encoder_outputs["contrastive_spec_logits"]
result["contrastive_spec_logits"] = contrastive_spec_logits
# Diversity Loss and L1 Loss for feature encoder outputs
result["features_pen"] = speech_encoder_outputs["features_pen"]
result["prob_perplexity"] = speech_encoder_outputs["prob_perplexity"]
result["code_perplexity"] = speech_encoder_outputs["code_perplexity"]
result["num_vars"] = speech_encoder_outputs["num_vars"]
result["temp"] = speech_encoder_outputs["temp"]
result["num_spec_samples"] = num_spec_samples
## Forward ASR decoder
# cif_inputs = {
# "encoder_raw_out": speech_outputs,
# "encoder_padding_mask": padding_mask,
# }
# target_lengths = (text_padding_mask != 0).int().sum(-1)
# cif_outputs = self.cif(encoder_outputs=cif_inputs, target_lengths=target_lengths)
# decoder_outputs = self.nar_asr_decoder(prev_output_tokens=text, cif_out=cif_outputs)
return result
def prepare_cse_inputs(
self,
text_encoder_outputs=None,
speech_encoder_outputs=None,
text_enc_data_borders=None,
spec_enc_data_borders=None,
data_borders=None,
):
all_keys = data_borders.keys()
text_max_len = 0
if text_encoder_outputs is not None:
text_outputs = text_encoder_outputs["text_outputs"]
text_padding_mask = text_encoder_outputs["text_padding_mask"]
text_max_len = text_padding_mask.size(-1)
spec_max_len = 0
if speech_encoder_outputs is not None:
speech_outputs = speech_encoder_outputs["speech_outputs"]
spec_padding_mask = speech_encoder_outputs["speech_padding_mask"]
spec_max_len = spec_padding_mask.size(-1)
text_part_list = []
text_padding_mask_list = []
spec_part_list = []
spec_padding_mask_list = []
# Prepare the text part of cse inputs
if "text" in all_keys and text_encoder_outputs is not None:
text_text_outputs = text_outputs[
text_enc_data_borders["text"][0] : text_enc_data_borders["text"][1]
]
text_spec_outputs = torch.randn(
[text_text_outputs.size(0), spec_max_len, text_outputs.size(-1)]
).cuda()
text_text_padding_mask = text_padding_mask[
text_enc_data_borders["text"][0] : text_enc_data_borders["text"][1]
]
text_spec_padding_mask = (
torch.ones([text_text_outputs.size(0), spec_max_len]).cuda().bool()
)
text_part_list.append(text_text_outputs)
text_padding_mask_list.append(text_text_padding_mask)
spec_part_list.append(text_spec_outputs)
spec_padding_mask_list.append(text_spec_padding_mask)
# Prepare the spec part of cse inputs
if "spec" in all_keys and speech_encoder_outputs is not None:
spec_spec_outputs = speech_outputs[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
spec_text_outputs = torch.randn(
[spec_spec_outputs.size(0), text_max_len, speech_outputs.size(-1)]
).cuda()
spec_spec_padding_mask = spec_padding_mask[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
spec_text_padding_mask = (
torch.ones([spec_spec_outputs.size(0), text_max_len]).cuda().bool()
)
text_part_list.append(spec_text_outputs)
text_padding_mask_list.append(spec_text_padding_mask)
spec_part_list.append(spec_spec_outputs)
spec_padding_mask_list.append(spec_spec_padding_mask)
# Prepare the pair part of cse inputs
if (
"pair" in all_keys
and speech_encoder_outputs is not None
and text_encoder_outputs is not None
):
paired_text_outputs = text_outputs[
text_enc_data_borders["pair"][0] : text_enc_data_borders["pair"][1]
]
paired_spec_outputs = speech_outputs[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
paired_text_padding_mask = text_padding_mask[
text_enc_data_borders["pair"][0] : text_enc_data_borders["pair"][1]
]
paired_spec_padding_mask = spec_padding_mask[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
text_part_list.append(paired_text_outputs)
text_padding_mask_list.append(paired_text_padding_mask)
spec_part_list.append(paired_spec_outputs)
spec_padding_mask_list.append(paired_spec_padding_mask)
text_inputs = torch.cat(text_part_list, dim=0)
modified_text_padding_mask = torch.cat(text_padding_mask_list, dim=0)
spec_inputs = torch.cat(spec_part_list, dim=0)
modified_spec_padding_mask = torch.cat(spec_padding_mask_list, dim=0)
assert text_inputs.size(0) == spec_inputs.size(0)
total_bsz = text_inputs.size(0)
cls_padding_mask = torch.zeros([total_bsz, 1]).cuda()
sep_padding_mask = torch.zeros([total_bsz, 1]).cuda()
joint_inputs = torch.cat(
[
self.cls_emb.unsqueeze(0).repeat([total_bsz, 1]).unsqueeze(1),
text_inputs,
self.sep_emb.unsqueeze(0).repeat([total_bsz, 1]).unsqueeze(1),
spec_inputs,
],
dim=1,
)
joint_padding_mask = torch.cat(
[
cls_padding_mask,
modified_text_padding_mask,
sep_padding_mask,
modified_spec_padding_mask,
],
dim=1,
)
return joint_inputs, joint_padding_mask, text_max_len, spec_max_len
def forward_cross_modal_encoder(
self, inputs, input_padding_mask, text_max_len, spec_max_len, data_borders
):
inputs = inputs.half()
if self.disable_ce_encoder or self.cross_modal_encoder_layers == 0:
return inputs
if not self.ce_encoder_layer_norm_first:
inputs = self.ce_encoder_layer_norm(inputs)
inputs = self.dropout_ce_inputs(inputs)
if "text" in data_borders and (
"pair" in data_borders or "spec" in data_borders
):
## 1.Forward pure text inputs
pure_textual_inputs = inputs[
data_borders["text"][0] : data_borders["text"][1]
] # B x T x C
pure_textual_inputs = pure_textual_inputs[:, : (text_max_len + 2), :]
# (text_max_len + 2) cause there are [CLS] and [SEP]
pure_textual_input_padding_mask = input_padding_mask[
data_borders["text"][0] : data_borders["text"][1]
]
pure_textual_input_padding_mask = pure_textual_input_padding_mask[
:, : (text_max_len + 2)
]
pure_textual_inputs = pure_textual_inputs.transpose(
0, 1
).half() # T x B x C
for layer in self.text_encoder_stacks:
pure_textual_inputs, _ = layer(
pure_textual_inputs,
self_attn_padding_mask=pure_textual_input_padding_mask,
need_weights=False,
)
pure_textual_inputs = pure_textual_inputs.transpose(0, 1) # B x T x C
## 2.Forward other parts with only speech or paired data
num_text_samples = pure_textual_inputs.size(0)
other_inputs = inputs[num_text_samples:, :, :]
other_input_padding_mask = input_padding_mask[num_text_samples:, :]
other_inputs = other_inputs.transpose(0, 1).half()
for layer in self.text_encoder_stacks:
other_inputs, _ = layer(
other_inputs,
self_attn_padding_mask=other_input_padding_mask,
need_weights=False,
)
other_inputs = other_inputs.transpose(0, 1) # B x T x C
## 3.Combine all of them
pure_textual_inputs = torch.cat(
[
pure_textual_inputs, # num_text_samples x (text_max_len + 2) x C
torch.zeros(
num_text_samples, spec_max_len, pure_textual_inputs.size(-1)
)
.half()
.cuda(),
],
dim=1,
)
outputs = torch.cat([pure_textual_inputs, other_inputs], dim=0)
else:
# Forward cross-modal encoder
inputs = inputs.transpose(0, 1).half()
for layer in self.text_encoder_stacks:
inputs, _ = layer(
inputs,
self_attn_padding_mask=input_padding_mask,
need_weights=False,
)
outputs = inputs.transpose(0, 1) # B x T x C
if self.ce_encoder_layer_norm_first:
outputs = self.ce_encoder_layer_norm(outputs)
return outputs
def forward_text_embedding_module(self, text):
x = self.text_embedding_layer(text)
x = self.embed_scale * x
if self.text_embed_positions is not None:
x = x + self.text_embed_positions(text)
if not self.text_encoder_layer_norm_first:
x = self.text_encoder_layer_norm(x)
x = self.dropout_text_inputs(x)
return x
def forward_text_encoder(self, text_enc_samples, text_enc_data_borders, mask_text):
text = text_enc_samples["text"]
text_padding_mask = (text == self.default_dict.pad()).bool()
# Forward text embedding layers
text_embeds = self.forward_text_embedding_module(text)
# Forward masking
unpaired_text_mask_indices = None
paired_text_mask_indices = None
if mask_text:
masked_text_embeds_list = []
text_mask_indices_list = []
if "text" in text_enc_data_borders.keys():
# For unpaired text
unpaired_text_embeds = text_embeds[
text_enc_data_borders["text"][0] : text_enc_data_borders["text"][1]
]
unpaired_text_padding_mask = text_padding_mask[
text_enc_data_borders["text"][0] : text_enc_data_borders["text"][1]
]
(
unpaired_masked_text_embeds,
unpaired_text_mask_indices,
) = self.apply_text_temporal_mask(
unpaired_text_embeds,
unpaired_text_padding_mask,
text_mask_prob=self.mlm_text_mask_prob,
text_mask_length=self.mlm_text_mask_span_length,
)
masked_text_embeds_list.append(unpaired_masked_text_embeds)
text_mask_indices_list.append(unpaired_text_mask_indices)
if unpaired_text_mask_indices.numel() == 0:
print(unpaired_text_mask_indices)
print(unpaired_text_mask_indices.size())
raise ValueError("unpaired_text_mask_indices has no elements.")
if "pair" in text_enc_data_borders.keys():
# For paired text
paired_text_embeds = text_embeds[
text_enc_data_borders["pair"][0] : text_enc_data_borders["pair"][1]
]
paired_text_padding_mask = text_padding_mask[
text_enc_data_borders["pair"][0] : text_enc_data_borders["pair"][1]
]
(
paired_masked_text_embeds,
paired_text_mask_indices,
) = self.apply_text_temporal_mask(
paired_text_embeds,
paired_text_padding_mask,
text_mask_prob=self.tlm_text_mask_prob,
text_mask_length=self.tlm_text_mask_span_length,
)
masked_text_embeds_list.append(paired_masked_text_embeds)
text_mask_indices_list.append(paired_text_mask_indices)
# Combine each outputs
masked_text_embeds = torch.cat(masked_text_embeds_list, dim=0)
masked_text_indices = torch.cat(text_mask_indices_list, dim=0)
else:
masked_text_embeds = text_embeds
masked_text_indices = None
# Forward transformer layers
x = masked_text_embeds.transpose(0, 1) # T x B x C
for layer in self.text_encoder_stacks:
x, _ = layer(
x, self_attn_padding_mask=text_padding_mask, need_weights=False
)
x = x.transpose(0, 1) # B x T x C
if self.text_encoder_layer_norm_first:
x = self.text_encoder_layer_norm(x) # B x T x C
result = {
"text_outputs": x,
"text_padding_mask": text_padding_mask,
"text_mask_indices": masked_text_indices,
}
if "text" in text_enc_data_borders.keys():
if unpaired_text_mask_indices is not None:
unpaired_text = text[
text_enc_data_borders["text"][0] : text_enc_data_borders["text"][1]
]
result["unpaired_text"] = unpaired_text
result["mlm_targets"] = unpaired_text[unpaired_text_mask_indices]
result["unpaired_text_mask_indices"] = unpaired_text_mask_indices
if "pair" in text_enc_data_borders.keys():
if paired_text_mask_indices is not None:
paired_text = text[
text_enc_data_borders["pair"][0] : text_enc_data_borders["pair"][1]
]
result["paired_text"] = paired_text
result["tlm_targets"] = paired_text[paired_text_mask_indices]
result["paired_text_mask_indices"] = paired_text_mask_indices
return result
def forward_speech_encoder(
self, spec_enc_samples, spec_enc_data_borders, mask_audio=False
):
# Get all speech samples
source = spec_enc_samples["source"]
padding_mask = spec_enc_samples["padding_mask"]
# Forward conv feat extractor
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = (
features[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
.float()
.pow(2)
.mean()
if "spec" in spec_enc_data_borders.keys()
else None
)
features = features.transpose(1, 2)
features = self.conv_layer_norm(features)
unmasked_features = features.clone()
# if padding_mask is not None and padding_mask.any():
if padding_mask is not None:
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
features.shape[:2], dtype=features.dtype, device=features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
# Forward masking operation
y_spec = None
unpaired_spec_mask_indices = None
unpaired_spec_unmasked_features = None
y_pair = None
paired_mask_indices = None
pair_unmasked_features = None
if mask_audio:
masked_spec_feats_list = []
spec_mask_indices_list = []
if "spec" in spec_enc_data_borders.keys():
# For unpaired speech
unpaired_spec_feats = features[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
unpaired_spec_padding_mask = padding_mask[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
(
unpaired_masked_spec_feats,
unpaired_spec_mask_indices,
) = self.apply_spec_channel_temporal_mask(
unpaired_spec_feats, unpaired_spec_padding_mask
)
unpaired_spec_unmasked_features = unmasked_features[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
masked_spec_feats_list.append(unpaired_masked_spec_feats)
spec_mask_indices_list.append(unpaired_spec_mask_indices)
if (
not is_xla_tensor(unpaired_masked_spec_feats)
and unpaired_spec_mask_indices is not None
):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y_spec = unpaired_spec_unmasked_features[
unpaired_spec_mask_indices
].view(
unpaired_spec_unmasked_features.size(0),
-1,
unpaired_spec_unmasked_features.size(-1),
) # y is the real values of the masked locations
else:
y_spec = unpaired_spec_unmasked_features
if "pair" in spec_enc_data_borders.keys():
# Paired data
paired_spec_feats = features[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
paired_spec_padding_mask = padding_mask[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
(
paired_masked_spec_feats,
paired_mask_indices,
) = self.apply_spec_temporal_mask(
paired_spec_feats, paired_spec_padding_mask
)
pair_unmasked_features = unmasked_features[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
masked_spec_feats_list.append(paired_masked_spec_feats)
spec_mask_indices_list.append(paired_mask_indices)
if (
not is_xla_tensor(paired_masked_spec_feats)
and paired_mask_indices is not None
):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y_pair = pair_unmasked_features[paired_mask_indices].view(
pair_unmasked_features.size(0),
-1,
pair_unmasked_features.size(-1),
) # y is the real values of the masked locations
else:
y_pair = pair_unmasked_features
masked_spec_feats = torch.cat(masked_spec_feats_list, dim=0)
spec_mask_indices = torch.cat(spec_mask_indices_list, dim=0)
else:
# All feats after masking
masked_spec_feats = features
spec_mask_indices = None
# For contrastive learning
if "spec" in spec_enc_data_borders.keys():
y_spec = unmasked_features[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
unpaired_spec_unmasked_features = unmasked_features[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
]
if "pair" in spec_enc_data_borders.keys():
y_pair = unmasked_features[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
pair_unmasked_features = unmasked_features[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
]
# Forward contrastive module of speech encoder
x, layer_results = self.speech_encoder(
masked_spec_feats, padding_mask=padding_mask
)
# Forward quantizer module
def forward_quantizer(x, y, unmasked_features, mask_indices, return_all=False):
# Forward quantizer part with convolutional layers outputs
if self.quantizer:
q = self.quantizer(y, produce_targets=True)
y = q["x"] # B x T x C
num_vars = q["num_vars"]
code_ppl = q["code_perplexity"]
prob_ppl = q["prob_perplexity"]
curr_temp = q["temp"]
quantized_target_ids = q["targets"] # B x T x G
y = self.project_q(y)
# Obtain negtive samples for contrastive loss
if self.negatives_from_everywhere:
neg_cands = self.quantizer(
unmasked_features, produce_targets=False
)["x"]
negs, _ = self.sample_negatives(
neg_cands, y.size(1), padding_count=None
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=None,
) # N_negs x B x T x C
# Obtain some negtive samples from codebooks
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
quantized_target_ids = None # B x T x G
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features, y.size(1), padding_count=None
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(y, y.size(1), padding_count=None)
# Take out the masked locations final outputs
if not is_xla_tensor(x):
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x = x[mask_indices].view(x.size(0), -1, x.size(-1)) # B x T_mask x C
# Unavailable for now
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
# y shape = B x T_mask x C
# negs shape = n_negs x B x T x C
x = self.final_proj(
x
) # Project x to the dimension of latent variables, B x T_mask x C_final
x = self.compute_preds(x, y, negs)
if return_all:
return x, quantized_target_ids, num_vars, code_ppl, prob_ppl, curr_temp
else:
return x, quantized_target_ids
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
logits_spec = None
spec_quantized_target_ids = None
if "spec" in spec_enc_data_borders.keys():
(
logits_spec,
spec_quantized_target_ids,
num_vars,
code_ppl,
prob_ppl,
curr_temp,
) = forward_quantizer(
x=x[
spec_enc_data_borders["spec"][0] : spec_enc_data_borders["spec"][1]
],
y=y_spec,
unmasked_features=unpaired_spec_unmasked_features,
mask_indices=unpaired_spec_mask_indices,
return_all=True,
)
logits_pair = None
pair_quantized_target_ids = None
if "pair" in spec_enc_data_borders.keys():
logits_pair, pair_quantized_target_ids = forward_quantizer(
x=x[
spec_enc_data_borders["pair"][0] : spec_enc_data_borders["pair"][1]
],
y=y_pair,
unmasked_features=pair_unmasked_features,
mask_indices=paired_mask_indices,
return_all=False,
)
# General outputs
result = {
"speech_outputs": x,
"speech_padding_mask": padding_mask,
"spec_mask_indices": spec_mask_indices,
"features_pen": features_pen,
}
if "spec" in spec_enc_data_borders.keys():
if unpaired_spec_mask_indices is not None:
result["unpaired_spec_mask_indices"] = unpaired_spec_mask_indices
if spec_quantized_target_ids is not None:
result["spec_quantized_target_ids"] = spec_quantized_target_ids
if logits_spec is not None:
result["contrastive_spec_logits"] = logits_spec
# print("logits_spec: ")
# print(logits_spec.size())
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
if "pair" in spec_enc_data_borders.keys():
if paired_mask_indices is not None:
result["paired_spec_mask_indices"] = paired_mask_indices
if pair_quantized_target_ids is not None:
result["pair_quantized_target_ids"] = pair_quantized_target_ids
if logits_pair is not None:
result["tlm_spec_logits"] = logits_pair
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, sample, padding_mask, mask=False, layer=None):
res = self.forward(
sample, mask_audio=mask, mask_text=mask, features_only=True, mode=1
)
return res
def get_infonce_logits(self, net_output):
logits = net_output["contrastive_spec_logits"]
logits = logits.transpose(0, 2)
logits = logits.reshape(-1, logits.size(-1))
return logits
def get_infonce_targets(self, net_output):
x = net_output["contrastive_spec_logits"]
return x.new_zeros(x.size(1) * x.size(2), dtype=torch.long)
# noinspection PyStatementEffect
def get_extra_losses(self, net_output):
pen = {}
if "prob_perplexity" in net_output.keys():
pen["prob_perplexity"] = (
net_output["num_vars"] - net_output["prob_perplexity"]
) / net_output["num_vars"]
# (net_output["num_vars"] - net_output["prob_perplexity"]) / net_output["num_vars"]
if "features_pen" in net_output.keys():
pen["features_pen"] = net_output["features_pen"]
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""Get normalized probabilities (or log probs) from logits."""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
class CifMiddleware(nn.Module):
def __init__(self, cfg):
super().__init__()
# Get configurations related to continuous integrate-and-fire
self.cif_threshold = cfg.cif_threshold
self.cif_output_dim = cfg.cif_embedding_dim
self.encoder_embed_dim = cfg.cif_input_embed_dim
self.produce_weight_type = cfg.produce_weight_type
self.apply_scaling = cfg.apply_scaling
self.apply_tail_handling = cfg.apply_tail_handling
self.tail_handling_firing_threshold = cfg.tail_handling_firing_threshold
# Build weight projection layer to compute weight from encoder outputs
if self.produce_weight_type == "dense":
self.dense_proj = Linear(
self.encoder_embed_dim, cfg.dense_cif_units_num
).cuda()
self.weight_proj = Linear(cfg.dense_cif_units_num, 1).cuda()
elif self.produce_weight_type == "conv":
self.cif_conv_layer_num = cfg.conv_cif_layer_num
self.conv = torch.nn.Conv1d(
self.encoder_embed_dim,
cfg.conv_cif_output_channels_num,
cfg.conv_cif_width,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
).cuda()
self.conv_dropout = torch.nn.Dropout(p=cfg.conv_cif_dropout).cuda()
self.weight_proj = Linear(cfg.conv_cif_output_channels_num, 1).cuda()
else:
self.weight_proj = Linear(self.encoder_embed_dim, 1).cuda()
# Build the final projection layer for cif outputs
if self.cif_output_dim != self.encoder_embed_dim:
self.cif_output_proj = Linear(
self.encoder_embed_dim, self.cif_output_dim, bias=False
).cuda()
def forward(self, encoder_outputs, target_lengths, **kwargs):
# Collect inputs
encoder_raw_outputs = encoder_outputs["encoder_raw_out"] # B x T x C
encoder_padding_mask = encoder_outputs["encoder_padding_mask"] # B x T
# Convert boolean value to integer type
# encoder_raw_outputs should have shape [batch_size, Length, encoder_embed_dim]
# targets_length should have shape [batch_size]
# encoder_padding_mask should have shape [batch_size, length]
not_padding_mask = ~encoder_padding_mask # non_padding_mask has shape B x T
# Produce weights
if self.produce_weight_type == "dense":
x = self.dense_proj(encoder_raw_outputs)
x = torch.relu(x)
x = self.weight_proj(x)
elif self.produce_weight_type == "conv":
x = encoder_raw_outputs.permute(
0, 2, 1
) # Adjust the shape of convolution layer input [B, C_in, T]
x = self.conv(x) # conv_out has shape [B, C_out, T]
x = x.permute(0, 2, 1)
x = self.conv_dropout(x) # Adjust conv output to shape [B, T, C_cif]
x = self.weight_proj(x)
else:
x = self.weight_proj(encoder_raw_outputs)
# Calculate weights
weight = torch.sigmoid(x) # weight has shape B x T x 1
weight = weight.squeeze(-1) * not_padding_mask.int() # weight has shape B x T
org_weight = weight
# Sum weights
if self.training and self.apply_scaling and target_lengths is not None:
# Conduct scaling when training
# (target_lengths + 1 because this target_lengths does not take <eos> into consideration)
weight_sum = weight.sum(-1) # weight_sum has shape [batch_size]
normalize_scalar = torch.unsqueeze(target_lengths / weight_sum, -1)
weight = weight * normalize_scalar
# Integrate and fire
batch_size = encoder_raw_outputs.size(0)
max_length = encoder_raw_outputs.size(1)
encoder_embed_dim = encoder_raw_outputs.size(2)
padding_start_id = not_padding_mask.sum(-1) # shape B
# Initialize
accumulated_weights = torch.zeros(batch_size, 0).cuda()
accumulated_states = torch.zeros(batch_size, 0, encoder_embed_dim).cuda()
fired_states = torch.zeros(batch_size, 0, encoder_embed_dim).cuda()
# Begin integrate and fire
for i in range(max_length):
# Get previous states from the recorded tensor
prev_accumulated_weight = (
torch.zeros([batch_size]).cuda()
if i == 0
else accumulated_weights[:, i - 1]
)
prev_accumulated_state = (
torch.zeros([batch_size, encoder_embed_dim]).cuda()
if i == 0
else accumulated_states[:, i - 1, :]
)
# Decide whether positioning a boundary
cur_is_fired = (
(prev_accumulated_weight + weight[:, i]) >= self.cif_threshold
).unsqueeze(dim=-1)
# cur_is_fired with shape [batch_size, 1]
# Update the accumulated weights by considering whether positioning a boundary
cur_weight = weight[:, i].unsqueeze(-1)
# cur_weight has shape [batch_size, 1]
prev_accumulated_weight = prev_accumulated_weight.unsqueeze(-1)
# prev_accumulated_weight also has shape [batch_size ,1]
remained_weight = (
torch.ones_like(prev_accumulated_weight).cuda()
- prev_accumulated_weight
)
# remained_weight with shape [batch_size ,1]
# Obtain the accumulated weight of current step
cur_accumulated_weight = torch.where(
cur_is_fired,
cur_weight - remained_weight,
cur_weight + prev_accumulated_weight,
) # [batch_size, 1]
# Obtain accumulated state of current step
cur_accumulated_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
(cur_weight - remained_weight) * encoder_raw_outputs[:, i, :],
prev_accumulated_state + cur_weight * encoder_raw_outputs[:, i, :],
) # [batch_size, encoder_embed_dim]
# Obtain fired state of current step:
# firing locations has meaningful representations, while non-firing locations is all-zero embeddings
cur_fired_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
prev_accumulated_state + remained_weight * encoder_raw_outputs[:, i, :],
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
) # shape = [batch_size, encoder_embed_dim]
# Handling the speech tail by rounding up and down
if (not self.training) and self.apply_tail_handling:
# When encoder output position exceeds the max valid position,
# if accumulated weights is greater than 0.6,
# current state should be reserved, otherwise it is discarded.
cur_fired_state = torch.where(
i
== padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
),
# shape = [batch_size, encoder_embed_dim]
torch.where(
cur_accumulated_weight.repeat([1, encoder_embed_dim])
<= self.tail_handling_firing_threshold,
# shape = [batch_size, encoder_embed_dim]
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
# less equal than 0.5, discarded.
cur_accumulated_state / (cur_accumulated_weight + 1e-10)
# bigger than 0.5, normalized and kept.
),
cur_fired_state,
)
# shape = [batch_size, encoder_embed_dim]
# For normal condition, including both training and evaluation
# Mask padded locations with all-zero embeddings
cur_fired_state = torch.where(
torch.full([batch_size, encoder_embed_dim], i).cuda()
> padding_start_id.unsqueeze(dim=-1).repeat([1, encoder_embed_dim]),
torch.zeros([batch_size, encoder_embed_dim]).cuda(),
cur_fired_state,
)
# Update accumulated arguments
accumulated_weights = torch.cat(
(accumulated_weights, cur_accumulated_weight), 1
) # shape = [batch_size, Len]
accumulated_states = torch.cat(
(accumulated_states, cur_accumulated_state.unsqueeze(1)), 1
) # shape = [B, L, D]
fired_states = torch.cat(
(fired_states, cur_fired_state.unsqueeze(1)), 1
) # shape = [B, L, D]
# Extracts cif_outputs for each utterance
fired_marks = (
torch.abs(fired_states).sum(-1) != 0.0
).int() # [batch_size, max_length]
fired_utt_length = fired_marks.sum(-1) # [batch_size]
fired_max_length = (
fired_utt_length.max().int()
) # The maximum of fired times in current batch
cif_outputs = torch.zeros(
[0, fired_max_length, encoder_embed_dim]
).cuda() # Initialize cif outputs
def dynamic_partition(
data: torch.Tensor, partitions: torch.Tensor, num_partitions=None
):
assert (
len(partitions.shape) == 1
), "Only one dimensional partitions supported"
assert (
data.shape[0] == partitions.shape[0]
), "Partitions requires the same size as data"
if num_partitions is None:
num_partitions = max(torch.unique(partitions))
return [data[partitions == i] for i in range(num_partitions)]
for j in range(batch_size):
# Get information of j-th sample
cur_utt_fired_mark = fired_marks[j, :]
cur_utt_fired_state = fired_states[j, :, :]
cur_utt_outputs = dynamic_partition(
cur_utt_fired_state, cur_utt_fired_mark, 2
)
cur_utt_output = cur_utt_outputs[1] # Get integrated representations
cur_utt_length = cur_utt_output.size(0) # The total number of firing
pad_length = fired_max_length - cur_utt_length # Calculate padding length
cur_utt_output = torch.cat(
(
cur_utt_output,
torch.full([pad_length, encoder_embed_dim], 0.0).cuda(),
),
dim=0,
) # Pad current utterance cif outputs to fired_max_length
cur_utt_output = cur_utt_output.unsqueeze(0)
# Reshape to [1, fired_max_length, encoder_embed_dim]
# Concatenate cur_utt_output and cif_outputs along batch axis
cif_outputs = torch.cat([cif_outputs, cur_utt_output], 0)
cif_out_padding_mask = (torch.abs(cif_outputs).sum(-1) != 0.0).int()
# cif_out_padding_mask shape = [batch_size, fired_max_length], where locations with value 0 is False.
if self.training:
# In training phase, use summation of original weights as quantity out for quantity loss
quantity_out = org_weight.sum(-1)
else:
quantity_out = weight.sum(-1)
if self.cif_output_dim != encoder_embed_dim:
cif_outputs = self.cif_output_proj(cif_outputs)
return {
"cif_out": cif_outputs, # shape = [batch_size, fired_max_length, encoder_embed_dim]
"quantity_out": quantity_out, # shape = [batch_size]
"cif_out_padding_mask": cif_out_padding_mask, # shape = [batch_size, fired_max_length]
}
class NarProjAsrDecoder(nn.Module):
def __init__(self, cfg, dictionary):
super().__init__()
# Load parameters and build model
self.dictionary = dictionary
self.pre_final_proj_dim = cfg.pre_final_proj_dim
self.output_dim = len(self.dictionary)
self.output_proj = Linear(self.pre_final_proj_dim, self.output_dim).cuda()
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = cif_out["cif_out"]
# Collect shape information
batch_size, cif_len, cif_embed_dim = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim]).cuda()
# Regularize the length of input tokens and cif outputs
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
# Forword decoder
x = self.output_proj(x)
return x, None
class NarTransformerAsrDecoder(NarProjAsrDecoder):
def __init__(self, cfg, dictionary):
super().__init__(cfg, dictionary)
# Load decoder parameters
self.decoder_layers = cfg.nar_decoder_layers
self.decoder_embed_dim = cfg.nar_decoder_embed_dim
self.decoder_ffn_embed_dim = cfg.nar_decoder_ffn_dim
self.decoder_attention_heads = cfg.nar_decoder_num_heads
self.decoder_normalize_before = cfg.layer_norm_first
self.decoder_dropout = cfg.nar_decoder_dropout
self.decoder_attention_dropout = cfg.nar_decoder_attention_dropout
self.decoder_activation_dropout = cfg.nar_decoder_activation_dropout
assert (
self.decoder_embed_dim == self.pre_final_proj_dim
), "ensure that the dimension of decoder outputs is equal to pre_final_proj_dim"
# Build decoder stacks
self.decoder_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.decoder_embed_dim,
ffn_embedding_dim=self.decoder_ffn_embed_dim,
num_attention_heads=self.decoder_attention_heads,
dropout=self.decoder_dropout,
activation_dropout=self.decoder_activation_dropout,
attention_dropout=self.decoder_attention_dropout,
layer_norm_first=self.decoder_normalize_before,
)
for _ in range(cfg.decoder_layers)
]
)
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = cif_out["cif_out"]
padding_mask = ~cif_out["cif_out_padding_mask"].bool()
# Collect shape information
_, cif_len, _ = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim]).cuda() # B x 1 x C
padding_mask = torch.zeros([batch_size, cif_len]).cuda() # B x 1
# Regularize the length of input tokens and cif outputs, and padding_mask
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
padding_mask = padding_mask[:, :min_len] # B x min_len
# Forward decoder
x = x.transpose(0, 1)
for layer in self.decoder_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
x = x.transpose(0, 1)
final_logits = self.output_proj(x)
return final_logits, None
class TtsVocoder(nn.Module):
def __init__(self):
super().__init__()
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
layers = []
for _ in range(args.encoder_layers):
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
if args.checkpoint_activations:
layer = fsdp_wrap(layer)
layer = checkpoint_wrapper(layer)
layers.append(layer)
self.layers = nn.ModuleList(layers)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
# For float16 training
x = x.half()
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x = x.half()
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
x = self.dropout1(x)
x = residual + x
# self.self_attn_layer_norm = self.self_attn_layer_norm.half()
x = x.half()
x = self.self_attn_layer_norm(x)
x = x.half()
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 106,389
| 38.056535
| 119
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/wav2vec/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .wav2vec import * # noqa
from .wav2vec2 import * # noqa
from .wav2vec2_asr import * # noqa
from .wav2vec2_nar_cif_asr import *
from .speech_text_pretraining_model import *
| 358
| 31.636364
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/bart/hub_interface.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.hub_utils import GeneratorHubInterface
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class BARTHubInterface(GeneratorHubInterface):
"""A simple PyTorch Hub interface to BART.
Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart
"""
def __init__(self, cfg, task, model):
super().__init__(cfg, task, [model])
self.model = self.models[0]
def encode(
self, sentence: str, *addl_sentences, no_separator=True
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`).
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> bart.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> bart.encode(' world').tolist()
[0, 232, 2]
>>> bart.encode('world').tolist()
[0, 8331, 2]
"""
tokens = self.bpe.encode(sentence)
if len(tokens.split(" ")) > min(self.max_positions) - 2:
tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2])
bpe_sentence = "<s> " + tokens + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.cpu().numpy()
if tokens[0] == self.task.source_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.source_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def _build_sample(self, src_tokens: List[torch.LongTensor]):
# assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference(
src_tokens,
[x.numel() for x in src_tokens],
)
sample = dataset.collater(dataset)
sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
*args,
inference_step_args=None,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
inference_step_args = inference_step_args or {}
if "prefix_tokens" in inference_step_args:
raise NotImplementedError("prefix generation not implemented for BART")
res = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
src_tokens = batch["net_input"]["src_tokens"]
inference_step_args["prefix_tokens"] = src_tokens.new_full(
(src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos()
).to(device=self.device)
results = super().generate(
src_tokens,
*args,
inference_step_args=inference_step_args,
skip_invalid_size_inputs=skip_invalid_size_inputs,
**kwargs
)
for id, hypos in zip(batch["id"].tolist(), results):
res.append((id, hypos))
res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])]
return res
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > min(self.model.max_positions()):
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
tokens.to(device=self.device),
prev_output_tokens = tokens.clone()
prev_output_tokens[:, 0] = tokens.gather(
1,
(tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1),
).squeeze()
prev_output_tokens[:, 1:] = tokens[:, :-1]
features, extra = self.model(
src_tokens=tokens,
src_lengths=None,
prev_output_tokens=prev_output_tokens,
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
self.model.register_classification_head(
name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
)
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
features = self.extract_features(tokens.to(device=self.device))
sentence_representation = features[
tokens.eq(self.task.source_dictionary.eos()), :
].view(features.size(0), -1, features.size(-1))[:, -1, :]
logits = self.model.classification_heads[head](sentence_representation)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
def fill_mask(
self,
masked_inputs: List[str],
topk: int = 5,
match_source_len: bool = True,
**generate_kwargs
):
masked_token = "<mask>"
batch_tokens = []
for masked_input in masked_inputs:
assert (
masked_token in masked_input
), "please add one {} token for the input".format(masked_token)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (
(" {0} ".format(masked_token))
.join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
.strip()
)
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
).long()
batch_tokens.append(tokens)
# ensure beam size is at least as big as topk
generate_kwargs["beam"] = max(
topk,
generate_kwargs.get("beam", -1),
)
generate_kwargs["match_source_len"] = match_source_len
batch_hypos = self.generate(batch_tokens, **generate_kwargs)
return [
[(self.decode(hypo["tokens"]), hypo["score"]) for hypo in hypos[:topk]]
for hypos in batch_hypos
]
| 7,863
| 36.09434
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/bart/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
from typing import Optional
import logging
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
logger = logging.getLogger(__name__)
@register_model("bart")
class BARTModel(TransformerModel):
__jit_unused_properties__ = ["supported_targets"]
@classmethod
def hub_models(cls):
return {
"bart.base": "http://dl.fbaipublicfiles.com/fairseq/models/bart.base.tar.gz",
"bart.large": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz",
"bart.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz",
"bart.large.cnn": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.cnn.tar.gz",
"bart.large.xsum": "http://dl.fbaipublicfiles.com/fairseq/models/bart.large.xsum.tar.gz",
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
if hasattr(self.encoder, "dictionary"):
self.eos: int = self.encoder.dictionary.eos()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
help="Apply spectral normalization on the classification head",
)
@property
def supported_targets(self):
return {"self"}
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
features_only: bool = False,
classification_head_name: Optional[str] = None,
token_embeddings: Optional[torch.Tensor] = None,
return_all_hiddens: bool = True,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
token_embeddings=token_embeddings,
return_all_hiddens=return_all_hiddens,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
eos: int = self.eos
if classification_head_name is not None:
sentence_representation = x[src_tokens.eq(eos), :].view(
x.size(0), -1, x.size(-1)
)[:, -1, :]
for k, head in self.classification_heads.items():
# for torch script only supports iteration
if k == classification_head_name:
x = head(sentence_representation)
break
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
sample_break_mode="eos",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
sample_break_mode=sample_break_mode,
**kwargs,
)
return BARTHubInterface(x["args"], x["task"], x["models"][0])
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
logger.info("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
do_spectral_norm=getattr(
self.args, "spectral_norm_classification_head", False
),
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + "." if name != "" else ""
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
def truncate_emb(key):
if key in state_dict:
state_dict[key] = state_dict[key][:-1, :]
# When finetuning on translation task, remove last row of
# embedding matrix that corresponds to mask_idx token.
loaded_dict_size = state_dict["encoder.embed_tokens.weight"].size(0)
if (
loaded_dict_size == len(self.encoder.dictionary) + 1
and "<mask>" not in self.encoder.dictionary
):
truncate_emb("encoder.embed_tokens.weight")
truncate_emb("decoder.embed_tokens.weight")
truncate_emb("encoder.output_projection.weight")
truncate_emb("decoder.output_projection.weight")
# When continued pretraining on new set of languages for mbart,
# add extra lang embeddings at the end of embed_tokens.
# Note: newly added languages are assumed to have been added at the end.
if self.args.task == "multilingual_denoising" and loaded_dict_size < len(
self.encoder.dictionary
):
logger.info(
"Adding extra language embeddings not found in pretrained model for "
"continued pretraining of MBART on new set of languages."
)
loaded_mask_token_embedding = state_dict["encoder.embed_tokens.weight"][
-1, :
]
num_langids_to_add = len(self.encoder.dictionary) - loaded_dict_size
embed_dim = state_dict["encoder.embed_tokens.weight"].size(1)
new_lang_embed_to_add = torch.zeros(num_langids_to_add, embed_dim)
nn.init.normal_(new_lang_embed_to_add, mean=0, std=embed_dim**-0.5)
new_lang_embed_to_add = new_lang_embed_to_add.to(
dtype=state_dict["encoder.embed_tokens.weight"].dtype,
)
state_dict["encoder.embed_tokens.weight"] = torch.cat(
[
state_dict["encoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
state_dict["decoder.embed_tokens.weight"] = torch.cat(
[
state_dict["decoder.embed_tokens.weight"][
: loaded_dict_size - 1, :
],
new_lang_embed_to_add,
loaded_mask_token_embedding.unsqueeze(0),
]
)
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
if do_spectral_norm:
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture("bart", "bart_large")
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.relu_dropout = getattr(args, "relu_dropout", 0.0)
args.dropout = getattr(args, "dropout", 0.1)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.max_source_positions = getattr(args, "max_source_positions", 1024)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
@register_model_architecture("bart", "bart_base")
def bart_base_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * 768)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_large")
def mbart_large_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_large_architecture(args)
@register_model_architecture("bart", "mbart_base")
def mbart_base_architecture(args):
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
bart_base_architecture(args)
@register_model_architecture("bart", "mbart_base_wmt20")
def mbart_base_wmt20_architecture(args):
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
mbart_base_architecture(args)
| 15,517
| 39.306494
| 101
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/bart/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hub_interface import * # noqa
from .model import * # noqa
| 244
| 29.625
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/levenshtein_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
# -------------- Helper Functions --------------------------------------------------- #
def load_libnat():
try:
from fairseq import libnat_cuda
return libnat_cuda, True
except ImportError as e:
print(str(e) + "... fall back to CPU version")
try:
from fairseq import libnat
return libnat, False
except ImportError as e:
import sys
sys.stderr.write(
"ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n"
)
raise e
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx):
libnat, use_cuda = load_libnat()
def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels(
out_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
masked_tgt_masks = masked_tgt_masks.bool() & out_masks
mask_ins_targets = mask_ins_targets.type_as(in_tokens)[
:, 1 : in_masks.size(1)
].masked_fill_(~in_masks[:, 1:], 0)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx):
in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1)
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
mask_inputs = [
[len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels
]
# generate labels
masked_tgt_masks = []
for mask_input in mask_inputs:
mask_label = []
for beam_size in mask_input[1:-1]: # HACK 1:-1
mask_label += [0] + [1 for _ in range(beam_size)]
masked_tgt_masks.append(
mask_label + [0 for _ in range(out_seq_len - len(mask_label))]
)
mask_ins_targets = [
mask_input[1:-1]
+ [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))]
for mask_input in mask_inputs
]
# transform to tensor
masked_tgt_masks = torch.tensor(
masked_tgt_masks, device=out_tokens.device
).bool()
mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device)
masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx)
return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
if use_cuda:
return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx)
return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx)
def _get_del_targets(in_tokens, out_tokens, padding_idx):
libnat, use_cuda = load_libnat()
def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx):
in_masks = in_tokens.ne(padding_idx)
out_masks = out_tokens.ne(padding_idx)
word_del_targets = libnat.generate_deletion_labels(
in_tokens.int(),
libnat.levenshtein_distance(
in_tokens.int(),
out_tokens.int(),
in_masks.sum(1).int(),
out_masks.sum(1).int(),
),
)
word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_(
~in_masks, 0
)
return word_del_targets
def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx):
out_seq_len = out_tokens.size(1)
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
word_del_targets = [b[-1] for b in full_labels]
word_del_targets = [
labels + [0 for _ in range(out_seq_len - len(labels))]
for labels in word_del_targets
]
# transform to tensor
word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device)
return word_del_targets
if use_cuda:
return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx)
return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx)
def _apply_ins_masks(
in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx
):
in_masks = in_tokens.ne(padding_idx)
in_lengths = in_masks.sum(1)
# HACK: hacky way to shift all the paddings to eos first.
in_tokens.masked_fill_(~in_masks, eos_idx)
mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0)
out_lengths = in_lengths + mask_ins_pred.sum(1)
out_max_len = out_lengths.max()
out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None]
reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1)
out_tokens = (
in_tokens.new_zeros(in_tokens.size(0), out_max_len)
.fill_(padding_idx)
.masked_fill_(out_masks, unk_idx)
)
out_tokens[:, 0] = in_tokens[:, 0]
out_tokens.scatter_(1, reordering, in_tokens[:, 1:])
out_scores = None
if in_scores is not None:
in_scores.masked_fill_(~in_masks, 0)
out_scores = in_scores.new_zeros(*out_tokens.size())
out_scores[:, 0] = in_scores[:, 0]
out_scores.scatter_(1, reordering, in_scores[:, 1:])
return out_tokens, out_scores
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx):
word_ins_masks = in_tokens.eq(unk_idx)
out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks])
if in_scores is not None:
out_scores = in_scores.masked_scatter(
word_ins_masks, word_ins_scores[word_ins_masks]
)
else:
out_scores = None
return out_tokens, out_scores
def _apply_del_words(
in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx
):
# apply deletion to a tensor
in_masks = in_tokens.ne(padding_idx)
bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx)
max_len = in_tokens.size(1)
word_del_pred.masked_fill_(~in_masks, 1)
word_del_pred.masked_fill_(bos_eos_masks, 0)
reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1]
out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering)
out_scores = None
if in_scores is not None:
out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering)
out_attn = None
if in_attn is not None:
_mask = word_del_pred[:, :, None].expand_as(in_attn)
_reordering = reordering[:, :, None].expand_as(in_attn)
out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering)
return out_tokens, out_scores, out_attn
def _skip(x, mask):
"""
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
"""
if isinstance(x, int):
return x
if x is None:
return None
if isinstance(x, torch.Tensor):
if x.size(0) == mask.size(0):
return x[mask]
elif x.size(1) == mask.size(0):
return x[:, mask]
if isinstance(x, list):
return [_skip(x_i, mask) for x_i in x]
if isinstance(x, dict):
return {k: _skip(v, mask) for k, v in x.items()}
raise NotImplementedError
def _skip_encoder_out(encoder, encoder_out, mask):
if not mask.any():
return encoder_out
else:
return encoder.reorder_encoder_out(
encoder_out, mask.nonzero(as_tuple=False).squeeze()
)
def _fill(x, mask, y, padding_idx):
"""
Filling tensor x with y at masked positions (dim=0).
"""
if x is None:
return y
assert x.dim() == y.dim() and mask.size(0) == x.size(0)
assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2))
n_selected = mask.sum()
assert n_selected == y.size(0)
if n_selected == x.size(0):
return y
if x.size(1) < y.size(1):
dims = [x.size(0), y.size(1) - x.size(1)]
if x.dim() == 3:
dims.append(x.size(2))
x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1)
x[mask] = y
elif x.size(1) > y.size(1):
x[mask] = padding_idx
if x.dim() == 2:
x[mask, : y.size(1)] = y
else:
x[mask, : y.size(1), :] = y
else:
x[mask] = y
return x
| 9,508
| 31.343537
| 89
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/levenshtein_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding
from fairseq.modules import TransformerDecoderLayer
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .levenshtein_utils import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_get_del_targets,
_get_ins_targets,
_skip,
_skip_encoder_out,
)
@register_model("levenshtein_transformer")
class LevenshteinTransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return False
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument(
"--early-exit",
default="6,6,6",
type=str,
help="number of decoder layers before word_del, mask_ins, word_ins",
)
parser.add_argument(
"--no-share-discriminator",
action="store_true",
help="separate parameters for discriminator",
)
parser.add_argument(
"--no-share-maskpredictor",
action="store_true",
help="separate parameters for mask-predictor",
)
parser.add_argument(
"--share-discriminator-maskpredictor",
action="store_true",
help="share the parameters for both mask-predictor and discriminator",
)
parser.add_argument(
"--sampling-for-deletion",
action="store_true",
help="instead of argmax, use sampling to predict the tokens",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = LevenshteinTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
masked_tgt_masks, masked_tgt_tokens, mask_ins_targets = _get_ins_targets(
prev_output_tokens, tgt_tokens, self.pad, self.unk
)
mask_ins_targets = mask_ins_targets.clamp(min=0, max=255) # for safe prediction
mask_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
mask_ins_out, _ = self.decoder.forward_mask_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_out, _ = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=masked_tgt_tokens,
encoder_out=encoder_out,
)
# make online prediction
if self.decoder.sampling_for_deletion:
word_predictions = torch.multinomial(
F.softmax(word_ins_out, -1).view(-1, word_ins_out.size(-1)), 1
).view(word_ins_out.size(0), -1)
else:
word_predictions = F.log_softmax(word_ins_out, dim=-1).max(2)[1]
word_predictions.masked_scatter_(
~masked_tgt_masks, tgt_tokens[~masked_tgt_masks]
)
# generate training labels for deletion
word_del_targets = _get_del_targets(word_predictions, tgt_tokens, self.pad)
word_del_out, _ = self.decoder.forward_word_del(
normalize=False,
prev_output_tokens=word_predictions,
encoder_out=encoder_out,
)
word_del_masks = word_predictions.ne(self.pad)
return {
"mask_ins": {
"out": mask_ins_out,
"tgt": mask_ins_targets,
"mask": mask_ins_masks,
"ls": 0.01,
},
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": masked_tgt_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"word_del": {
"out": word_del_out,
"tgt": word_del_targets,
"mask": word_del_masks,
},
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
history = decoder_out.history
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = torch.zeros_like(output_tokens).fill_(255)
else:
if not encoder_out["encoder_padding_mask"]:
max_src_len = encoder_out["encoder_out"].size(0)
src_lens = encoder_out["encoder_out"].new(bsz).fill_(max_src_len)
else:
src_lens = (~encoder_out["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
word_del_score, word_del_attn = self.decoder.forward_word_del(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_del_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_del_word),
)
word_del_pred = word_del_score.max(-1)[1].bool()
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
mask_ins_score, _ = self.decoder.forward_mask_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_mask),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_mask),
)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] = mask_ins_score[:, :, 0] - eos_penalty
mask_ins_pred = mask_ins_score.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
if history is not None:
history.append(output_tokens.clone())
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
word_ins_score, word_ins_attn = self.decoder.forward_word_ins(
normalize=True,
prev_output_tokens=_skip(output_tokens, can_ins_word),
encoder_out=_skip_encoder_out(self.encoder, encoder_out, can_ins_word),
)
word_ins_score, word_ins_pred = word_ins_score.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
if history is not None:
history.append(output_tokens.clone())
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
initial_output_tokens = src_tokens.new_zeros(src_tokens.size(0), 2)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens[:, 1] = self.eos
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
class LevenshteinTransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
self.embed_mask_ins = Embedding(256, self.output_embed_dim * 2, None)
self.embed_word_del = Embedding(2, self.output_embed_dim, None)
# del_word, ins_mask, ins_word
self.early_exit = [int(i) for i in args.early_exit.split(",")]
assert len(self.early_exit) == 3
# copy layers for mask-predict/deletion
self.layers_msk = None
if getattr(args, "no_share_maskpredictor", False):
self.layers_msk = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[1])
]
)
self.layers_del = None
if getattr(args, "no_share_discriminator", False):
self.layers_del = nn.ModuleList(
[
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(self.early_exit[0])
]
)
if getattr(args, "share_discriminator_maskpredictor", False):
assert getattr(
args, "no_share_discriminator", False
), "must set saperate discriminator"
self.layers_msk = self.layers_del
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
layers=None,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
layers = self.layers if layers is None else layers
early_exit = len(layers) if early_exit is None else early_exit
for _, layer in enumerate(layers[:early_exit]):
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
@ensemble_decoder
def forward_mask_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[1],
layers=self.layers_msk,
**unused
)
features_cat = torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
decoder_out = F.linear(features_cat, self.embed_mask_ins.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[2],
layers=self.layers,
**unused
)
decoder_out = self.output_layer(features)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@ensemble_decoder
def forward_word_del(self, normalize, encoder_out, prev_output_tokens, **unused):
features, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
early_exit=self.early_exit[0],
layers=self.layers_del,
**unused
)
decoder_out = F.linear(features, self.embed_word_del.weight)
if normalize:
return F.log_softmax(decoder_out, -1), extra["attn"]
return decoder_out, extra["attn"]
@register_model_architecture("levenshtein_transformer", "levenshtein_transformer")
def levenshtein_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.early_exit = getattr(args, "early_exit", "6,6,6")
args.no_share_discriminator = getattr(args, "no_share_discriminator", False)
args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False)
args.share_discriminator_maskpredictor = getattr(
args, "share_discriminator_maskpredictor", False
)
args.no_share_last_layer = getattr(args, "no_share_last_layer", False)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de"
)
def levenshtein_transformer_wmt_en_de(args):
levenshtein_base_architecture(args)
# similar parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_vaswani_wmt_en_de_big"
)
def levenshtein_transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
levenshtein_base_architecture(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture(
"levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big"
)
def levenshtein_transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
levenshtein_transformer_vaswani_wmt_en_de_big(args)
| 20,131
| 38.39726
| 89
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/fairseq_nat_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq.models.transformer import (
TransformerDecoder,
TransformerEncoder,
TransformerModel,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def ensemble_encoder(func):
def wrapper(self, *args, **kwargs):
if self.ensemble_models is None or len(self.ensemble_models) == 1:
return func(self, *args, **kwargs)
encoder_outs = [
func(model, *args, **kwargs, return_all_hiddens=True)
for model in self.ensemble_models
]
_encoder_out = encoder_outs[0].copy()
def stack(key):
outs = [e[key][0] for e in encoder_outs]
return [torch.stack(outs, -1) if outs[0] is not None else None]
_encoder_out["encoder_out"] = stack("encoder_out")
_encoder_out["encoder_embedding"] = stack("encoder_embedding")
num_layers = len(_encoder_out["encoder_states"])
if num_layers > 0:
_encoder_out["encoder_states"] = [
torch.stack([e["encoder_states"][i] for e in encoder_outs], -1)
for i in range(num_layers)
]
return _encoder_out
return wrapper
def ensemble_decoder(func):
def wrapper(self, normalize=False, encoder_out=None, *args, **kwargs):
if self.ensemble_models is None or len(self.ensemble_models) == 1:
return func(
self, normalize=normalize, encoder_out=encoder_out, *args, **kwargs
)
def _replace(encoder_out, new_val):
new_encoder_out = encoder_out.copy()
new_encoder_out["encoder_out"] = [new_val]
return new_encoder_out
action_outs = [
func(
model,
normalize=normalize,
encoder_out=_replace(
encoder_out, encoder_out["encoder_out"][0][:, :, :, i]
),
*args,
**kwargs
)
for i, model in enumerate(self.ensemble_models)
]
if not isinstance(action_outs[0], tuple): # return multiple values
action_outs = [[a] for a in action_outs]
else:
action_outs = [list(a) for a in action_outs]
ensembled_outs = []
for i in range(len(action_outs[0])):
if i == 0 and normalize:
ensembled_outs += [
torch.logsumexp(
torch.stack([a[i] for a in action_outs], -1), dim=-1
)
- math.log(len(self.ensemble_models))
]
elif action_outs[0][i] is not None:
ensembled_outs += [torch.stack([a[i] for a in action_outs], -1)]
else:
ensembled_outs += [None]
if len(ensembled_outs) == 1:
return ensembled_outs[0]
return tuple(ensembled_outs)
return wrapper
class FairseqNATModel(TransformerModel):
"""
Abstract class for all nonautoregressive-based models
"""
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.tgt_dict = decoder.dictionary
self.bos = decoder.dictionary.bos()
self.eos = decoder.dictionary.eos()
self.pad = decoder.dictionary.pad()
self.unk = decoder.dictionary.unk()
self.ensemble_models = None
@property
def allow_length_beam(self):
return False
@property
def allow_ensemble(self):
return True
def enable_ensemble(self, models):
self.encoder.ensemble_models = [m.encoder for m in models]
self.decoder.ensemble_models = [m.decoder for m in models]
@staticmethod
def add_args(parser):
TransformerModel.add_args(parser)
parser.add_argument(
"--apply-bert-init",
action="store_true",
help="use custom param initialization for BERT",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = FairseqNATDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
encoder = FairseqNATEncoder(args, src_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
encoder.apply(init_bert_params)
return encoder
def forward_encoder(self, encoder_inputs):
return self.encoder(*encoder_inputs)
def forward_decoder(self, *args, **kwargs):
return NotImplementedError
def initialize_output_tokens(self, *args, **kwargs):
return NotImplementedError
def forward(self, *args, **kwargs):
return NotImplementedError
class FairseqNATEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.ensemble_models = None
@ensemble_encoder
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs)
class FairseqNATDecoder(TransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
self.ensemble_models = None
| 5,555
| 31.115607
| 83
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/nonautoregressive_ensembles.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq.models.nat import (
_apply_del_words,
_apply_ins_masks,
_apply_ins_words,
_fill,
_skip,
_skip_encoder_out,
)
class _EnsembleModelEncoder(object):
def __init__(self, models):
self.models = models
def reorder_encoder_out(self, encoder_outs, new_order):
encoder_outs = [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
return encoder_outs
class BasicEnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.bos = self.models[0].decoder.dictionary.bos()
self.eos = self.models[0].decoder.dictionary.eos()
self.pad = self.models[0].decoder.dictionary.pad()
self.unk = self.models[0].decoder.dictionary.unk()
self.encoder = _EnsembleModelEncoder(self.models)
def has_encoder(self):
return hasattr(self.models[0], "encoder")
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.forward_encoder(encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, *inputs):
raise NotImplementedError
def initialize_output_tokens(self, *inputs):
raise NotImplementedError
class EnsembleLevT(BasicEnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
@torch.no_grad()
def forward_decoder(
self, decoder_out, encoder_outs, eos_penalty=0.0, max_ratio=None, **kwargs
):
# LevT ensembling
# A pipeline of three steps: deletion, placeholder, and word insertion.
# We need to average scores in each step in a pipeline way because of dependence.
# deletion
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
attn = decoder_out.attn
bsz = output_tokens.size(0)
if max_ratio is None:
max_lens = output_tokens.new().fill_(255)
else:
if not encoder_outs[0]["encoder_padding_mask"]:
src_lens = (
encoder_outs[0]["encoder_out"][0]
.new(bsz)
.fill_(encoder_outs[0]["encoder_out"][0].size(1))
)
else:
src_lens = (~encoder_outs[0]["encoder_padding_mask"][0]).sum(1)
max_lens = (src_lens * max_ratio).clamp(min=10).long()
# delete words
# do not delete tokens if it is <s> </s>
can_del_word = output_tokens.ne(self.pad).sum(1) > 2
if can_del_word.sum() != 0: # we cannot delete, skip
output_tokens, output_scores, attn = self.forward_word_del(
encoder_outs,
output_tokens,
output_scores,
attn,
can_del_word,
)
# insert placeholders
can_ins_mask = output_tokens.ne(self.pad).sum(1) < max_lens
if can_ins_mask.sum() != 0:
output_tokens, output_scores = self.forward_mask_ins(
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
)
# insert words
can_ins_word = output_tokens.eq(self.unk).sum(1) > 0
if can_ins_word.sum() != 0:
output_tokens, output_scores, attn = self.forward_word_ins(
encoder_outs,
output_tokens,
output_scores,
attn,
can_ins_word,
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
attn = None if attn is None else attn[:, :cut_off, :]
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=attn,
history=None,
)
def forward_word_del(
self, encoder_outs, output_tokens, output_scores, attn, can_del_word
):
word_del_score_avg = []
word_del_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_del_out, word_del_attn = model.decoder.forward_word_del(
_skip(output_tokens, can_del_word),
_skip_encoder_out(model.encoder, encoder_out, can_del_word),
)
word_del_score = F.log_softmax(word_del_out, 2)
word_del_score_avg.append(word_del_score)
word_del_attn_avg.append(word_del_attn)
word_del_score_avg = torch.logsumexp(
torch.stack(word_del_score_avg, dim=0), dim=0
) - math.log(len(self.models))
word_del_pred = word_del_score_avg.max(-1)[1].bool()
if word_del_attn_avg[0] is not None:
word_del_attn_avg = torch.stack(word_del_attn_avg, dim=0) / len(self.models)
else:
word_del_attn_avg = None
_tokens, _scores, _attn = _apply_del_words(
output_tokens[can_del_word],
output_scores[can_del_word],
word_del_attn_avg,
word_del_pred,
self.pad,
self.bos,
self.eos,
)
output_tokens = _fill(output_tokens, can_del_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_del_word, _scores, 0)
attn = _fill(attn, can_del_word, _attn, 0.0)
return output_tokens, output_scores, attn
def forward_mask_ins(
self,
encoder_outs,
output_tokens,
output_scores,
can_ins_mask,
eos_penalty,
max_lens,
):
mask_ins_score_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
mask_ins_out, _ = model.decoder.forward_mask_ins(
_skip(output_tokens, can_ins_mask),
_skip_encoder_out(model.encoder, encoder_out, can_ins_mask),
)
mask_ins_score = F.log_softmax(mask_ins_out, 2)
if eos_penalty > 0.0:
mask_ins_score[:, :, 0] -= eos_penalty
mask_ins_score_avg.append(mask_ins_score)
mask_ins_score_avg = torch.logsumexp(
torch.stack(mask_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
mask_ins_pred = mask_ins_score_avg.max(-1)[1]
mask_ins_pred = torch.min(
mask_ins_pred, max_lens[can_ins_mask, None].expand_as(mask_ins_pred)
)
_tokens, _scores = _apply_ins_masks(
output_tokens[can_ins_mask],
output_scores[can_ins_mask],
mask_ins_pred,
self.pad,
self.unk,
self.eos,
)
output_tokens = _fill(output_tokens, can_ins_mask, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_mask, _scores, 0)
return output_tokens, output_scores
def forward_word_ins(
self, encoder_outs, output_tokens, output_scores, attn, can_ins_word
):
word_ins_score_avg = []
word_ins_attn_avg = []
for model, encoder_out in zip(self.models, encoder_outs):
word_ins_out, word_ins_attn = model.decoder.forward_word_ins(
_skip(output_tokens, can_ins_word),
_skip_encoder_out(model.encoder, encoder_out, can_ins_word),
)
word_ins_score = F.log_softmax(word_ins_out, 2)
word_ins_score_avg.append(word_ins_score)
word_ins_attn_avg.append(word_ins_attn)
word_ins_score_avg = torch.logsumexp(
torch.stack(word_ins_score_avg, dim=0), dim=0
) - math.log(len(self.models))
if word_ins_attn_avg[0] is not None:
word_ins_attn_avg = torch.stack(word_ins_attn_avg, dim=0) / len(self.models)
else:
word_ins_attn_avg = None
word_ins_score_max, word_ins_pred = word_ins_score_avg.max(-1)
_tokens, _scores = _apply_ins_words(
output_tokens[can_ins_word],
output_scores[can_ins_word],
word_ins_pred,
word_ins_score_max,
self.unk,
)
output_tokens = _fill(output_tokens, can_ins_word, _tokens, self.pad)
output_scores = _fill(output_scores, can_ins_word, _scores, 0)
attn = _fill(attn, can_ins_word, word_ins_attn, 0.0)
return output_tokens, output_scores, attn
def initialize_output_tokens(self, encoder_outs, src_tokens):
# LevT doesn't do length prediction.
return self.models[0].initialize_output_tokens(encoder_outs[0], src_tokens)
| 9,289
| 35.431373
| 89
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/insertion_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import (
FairseqNATModel,
LevenshteinTransformerDecoder,
LevenshteinTransformerModel,
ensemble_decoder,
)
from fairseq.models.transformer import Linear
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import new_arange
class NegativeDistanceScore(object):
def __init__(self):
# pre-compute some values
self.scores = {}
self.scores[0.5] = self.compute_score_full(50, 0.5)
self.scores[1.0] = self.compute_score_full(50, 1.0)
self.scores[2.0] = self.compute_score_full(50, 2.0)
def __call__(self, i, L, tau):
if (tau is None) or (tau > 1000):
return 1 / L
if tau in self.scores:
if L < self.scores[tau].shape[0]:
return self.scores[tau][L - 1, i]
return self.compute_score(L, tau)[i]
def compute_score(self, L, tau):
s = np.array([-abs(L / 2 - i) / tau for i in range(L)])
s = np.exp(s - s.max())
return s / s.sum()
def compute_score_full(self, L, tau):
s = -abs(np.arange(0, L - 1)[:, None] / 2 - np.arange(L)[None, :]) / tau
s = np.tril(s, 0) + np.triu(s - float("inf"), 1)
s = np.exp(s - s.max(1, keepdims=True))
return s / s.sum(1, keepdims=True)
neg_scorer = NegativeDistanceScore()
def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None):
try:
from fairseq import libnat
except ImportError as e:
import sys
sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n")
raise e
B = in_tokens.size(0)
T = in_tokens.size(1)
V = vocab_size
with torch.cuda.device_of(in_tokens):
in_tokens_list = [
[t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist())
]
out_tokens_list = [
[t for t in s if t != padding_idx]
for i, s in enumerate(out_tokens.tolist())
]
full_labels = libnat.suggested_ed2_path(
in_tokens_list, out_tokens_list, padding_idx
)
insert_labels = [a[:-1] for a in full_labels]
# numericalize1
insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float()
insert_index, insert_labels = zip(
*[
(w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau))
for i, labels in enumerate(insert_labels)
for j, label in enumerate(labels[1:-1])
for k, w in enumerate(label)
]
) # HACK 1:-1
insert_index, insert_labels = [
torch.tensor(list(a), device=in_tokens.device)
for a in [insert_index, insert_labels]
]
insert_label_tensors.scatter_(0, insert_index.long(), insert_labels)
insert_label_tensors = insert_label_tensors.view(B, T - 1, V)
return insert_label_tensors
def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx):
padding_masks = in_tokens[:, 1:].eq(padding_idx)
word_ins_scores.masked_fill_(padding_masks, 0.0)
word_ins_pred.masked_fill_(padding_masks, padding_idx)
in_coords = new_arange(in_tokens).type_as(in_scores)
# shift all padding predictions to infinite
out_coords = (in_coords[:, 1:] - 0.5).masked_fill(
word_ins_pred.eq(padding_idx), float("inf")
)
out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1]
out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords)
out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords)
return out_tokens, out_scores
@register_model("insertion_transformer")
class InsertionTransformerModel(LevenshteinTransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
parser.add_argument("--label-tau", default=None, type=float)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = InsertionTransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert tgt_tokens is not None, "forward function only supports training."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# generate training labels for insertion
word_ins_out = self.decoder.forward_word_ins(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt = _get_ins_targets(
prev_output_tokens,
tgt_tokens,
self.pad,
self.unk,
len(self.tgt_dict),
tau=self.decoder.label_tau,
).type_as(word_ins_out)
word_ins_masks = prev_output_tokens[:, 1:].ne(self.pad)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_masks,
"ls": self.args.label_smoothing,
"nll_loss": True,
}
}
def forward_decoder(
self, decoder_out, encoder_out, eos_penalty=0.0, max_ratio=None, **kwargs
):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# TODO: decoding for InsertionTransformer
word_ins_score = self.decoder.forward_word_ins(
normalize=True, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
if eos_penalty > 0.0:
word_ins_score[:, :, self.pad] -= eos_penalty
word_ins_score, word_ins_pred = word_ins_score.max(-1)
output_tokens, output_scores = _apply_ins_words(
output_tokens, output_scores, word_ins_pred, word_ins_score, self.pad
)
# delete some unnecessary paddings
cut_off = output_tokens.ne(self.pad).sum(1).max()
output_tokens = output_tokens[:, :cut_off]
output_scores = output_scores[:, :cut_off]
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
class InsertionTransformerDecoder(LevenshteinTransformerDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
# use the TransformerDecoder's __init__
super(LevenshteinTransformerDecoder, self).__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.pool_out = Linear(self.output_embed_dim * 2, self.output_embed_dim)
self.label_tau = getattr(args, "label_tau", None)
@ensemble_decoder
def forward_word_ins(self, normalize, encoder_out, prev_output_tokens):
features = self.extract_features(prev_output_tokens, encoder_out=encoder_out)[0]
features = self.pool_out(
torch.cat([features[:, :-1, :], features[:, 1:, :]], 2)
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
def forward_mask_ins(self, *args, **kwargs):
raise NotImplementedError
def forward_word_del(self, *args, **kwargs):
raise NotImplementedError
@register_model_architecture("insertion_transformer", "insertion_transformer")
def insertion_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# special for insertion transformer
args.label_tau = getattr(args, "label_tau", None)
| 10,460
| 36.227758
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/nonautoregressive_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import FairseqNATDecoder, FairseqNATModel, ensemble_decoder
from fairseq.models.transformer import Embedding
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def _mean_pooling(enc_feats, src_masks):
# enc_feats: T x B x C
# src_masks: B x T or None
if src_masks is None:
enc_feats = enc_feats.mean(0)
else:
src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
enc_feats = (
(enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
).sum(0)
return enc_feats
def _argmax(x, dim):
return (x == x.max(dim, keepdim=True)[0]).type_as(x)
def _uniform_assignment(src_lens, trg_lens):
max_trg_len = trg_lens.max()
steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size
# max_trg_len
index_t = utils.new_arange(trg_lens, max_trg_len).float()
index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len
index_t = torch.round(index_t).long().detach()
return index_t
@register_model("nonautoregressive_transformer")
class NATransformerModel(FairseqNATModel):
@property
def allow_length_beam(self):
return True
@staticmethod
def add_args(parser):
FairseqNATModel.add_args(parser)
# length prediction
parser.add_argument(
"--src-embedding-copy",
action="store_true",
help="copy encoder word embeddings as the initial input of the decoder",
)
parser.add_argument(
"--pred-length-offset",
action="store_true",
help="predicting the length difference between the target and source sentences",
)
parser.add_argument(
"--sg-length-pred",
action="store_true",
help="stop the gradients back-propagated from the length predictor",
)
parser.add_argument(
"--length-loss-factor",
type=float,
help="weights on the length prediction loss",
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
decoder = NATransformerDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "apply_bert_init", False):
decoder.apply(init_bert_params)
return decoder
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
return {
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": tgt_tokens.ne(self.pad),
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.ne(self.pad)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
step=step,
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
def initialize_output_tokens(self, encoder_out, src_tokens):
# length prediction
length_tgt = self.decoder.forward_length_prediction(
self.decoder.forward_length(normalize=True, encoder_out=encoder_out),
encoder_out=encoder_out,
)
max_length = length_tgt.clamp_(min=2).max()
idx_length = utils.new_arange(src_tokens, max_length)
initial_output_tokens = src_tokens.new_zeros(
src_tokens.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(encoder_out["encoder_out"][0])
return DecoderOut(
output_tokens=initial_output_tokens,
output_scores=initial_output_scores,
attn=None,
step=0,
max_step=0,
history=None,
)
def regenerate_length_beam(self, decoder_out, beam_size):
output_tokens = decoder_out.output_tokens
length_tgt = output_tokens.ne(self.pad).sum(1)
length_tgt = (
length_tgt[:, None]
+ utils.new_arange(length_tgt, 1, beam_size)
- beam_size // 2
)
length_tgt = length_tgt.view(-1).clamp_(min=2)
max_length = length_tgt.max()
idx_length = utils.new_arange(length_tgt, max_length)
initial_output_tokens = output_tokens.new_zeros(
length_tgt.size(0), max_length
).fill_(self.pad)
initial_output_tokens.masked_fill_(
idx_length[None, :] < length_tgt[:, None], self.unk
)
initial_output_tokens[:, 0] = self.bos
initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
initial_output_scores = initial_output_tokens.new_zeros(
*initial_output_tokens.size()
).type_as(decoder_out.output_scores)
return decoder_out._replace(
output_tokens=initial_output_tokens, output_scores=initial_output_scores
)
class NATransformerDecoder(FairseqNATDecoder):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(
args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
)
self.dictionary = dictionary
self.bos = dictionary.bos()
self.unk = dictionary.unk()
self.eos = dictionary.eos()
self.encoder_embed_dim = args.encoder_embed_dim
self.sg_length_pred = getattr(args, "sg_length_pred", False)
self.pred_length_offset = getattr(args, "pred_length_offset", False)
self.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
self.src_embedding_copy = getattr(args, "src_embedding_copy", False)
self.embed_length = Embedding(256, self.encoder_embed_dim, None)
@ensemble_decoder
def forward(self, normalize, encoder_out, prev_output_tokens, step=0, **unused):
features, _ = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
embedding_copy=(step == 0) & self.src_embedding_copy,
)
decoder_out = self.output_layer(features)
return F.log_softmax(decoder_out, -1) if normalize else decoder_out
@ensemble_decoder
def forward_length(self, normalize, encoder_out):
enc_feats = encoder_out["encoder_out"][0] # T x B x C
if len(encoder_out["encoder_padding_mask"]) > 0:
src_masks = encoder_out["encoder_padding_mask"][0] # B x T
else:
src_masks = None
enc_feats = _mean_pooling(enc_feats, src_masks)
if self.sg_length_pred:
enc_feats = enc_feats.detach()
length_out = F.linear(enc_feats, self.embed_length.weight)
return F.log_softmax(length_out, -1) if normalize else length_out
def extract_features(
self,
prev_output_tokens,
encoder_out=None,
early_exit=None,
embedding_copy=False,
**unused
):
"""
Similar to *forward* but only return features.
Inputs:
prev_output_tokens: Tensor(B, T)
encoder_out: a dictionary of hidden states and masks
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
the LevenshteinTransformer decoder has full-attention to all generated tokens
"""
# embedding
if embedding_copy:
src_embd = encoder_out["encoder_embedding"][0]
if len(encoder_out["encoder_padding_mask"]) > 0:
src_mask = encoder_out["encoder_padding_mask"][0]
else:
src_mask = None
src_mask = (
~src_mask
if src_mask is not None
else prev_output_tokens.new_ones(*src_embd.size()[:2]).bool()
)
x, decoder_padding_mask = self.forward_embedding(
prev_output_tokens,
self.forward_copying_source(
src_embd, src_mask, prev_output_tokens.ne(self.padding_idx)
),
)
else:
x, decoder_padding_mask = self.forward_embedding(prev_output_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for i, layer in enumerate(self.layers):
# early exit from the decoder.
if (early_exit is not None) and (i >= early_exit):
break
x, attn, _ = layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
self_attn_mask=None,
self_attn_padding_mask=decoder_padding_mask,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": attn, "inner_states": inner_states}
def forward_embedding(self, prev_output_tokens, states=None):
# embed positions
positions = (
self.embed_positions(prev_output_tokens)
if self.embed_positions is not None
else None
)
# embed tokens and positions
if states is None:
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
else:
x = states
if positions is not None:
x += positions
x = self.dropout_module(x)
decoder_padding_mask = prev_output_tokens.eq(self.padding_idx)
return x, decoder_padding_mask
def forward_copying_source(self, src_embeds, src_masks, tgt_masks):
length_sources = src_masks.sum(1)
length_targets = tgt_masks.sum(1)
mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill(
~tgt_masks, 0
)
copied_embedding = torch.gather(
src_embeds,
1,
mapped_inputs.unsqueeze(-1).expand(
*mapped_inputs.size(), src_embeds.size(-1)
),
)
return copied_embedding
def forward_length_prediction(self, length_out, encoder_out, tgt_tokens=None):
enc_feats = encoder_out["encoder_out"][0] # T x B x C
if len(encoder_out["encoder_padding_mask"]) > 0:
src_masks = encoder_out["encoder_padding_mask"][0] # B x T
else:
src_masks = None
if self.pred_length_offset:
if src_masks is None:
src_lengs = enc_feats.new_ones(enc_feats.size(1)).fill_(
enc_feats.size(0)
)
else:
src_lengs = (~src_masks).transpose(0, 1).type_as(enc_feats).sum(0)
src_lengs = src_lengs.long()
if tgt_tokens is not None:
# obtain the length target
tgt_lengs = tgt_tokens.ne(self.padding_idx).sum(1).long()
if self.pred_length_offset:
length_tgt = tgt_lengs - src_lengs + 128
else:
length_tgt = tgt_lengs
length_tgt = length_tgt.clamp(min=0, max=255)
else:
# predict the length target (greedy for now)
# TODO: implementing length-beam
pred_lengs = length_out.max(-1)[1]
if self.pred_length_offset:
length_tgt = pred_lengs - 128 + src_lengs
else:
length_tgt = pred_lengs
return length_tgt
@register_model_architecture(
"nonautoregressive_transformer", "nonautoregressive_transformer"
)
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
@register_model_architecture(
"nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de"
)
def nonautoregressive_transformer_wmt_en_de(args):
base_architecture(args)
| 16,891
| 35.962801
| 92
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .fairseq_nat_model import *
from .nonautoregressive_transformer import *
from .nat_crf_transformer import *
from .iterative_nonautoregressive_transformer import *
from .cmlm_transformer import *
from .levenshtein_transformer import *
from .insertion_transformer import *
| 476
| 33.071429
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/cmlm_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file implements:
Ghazvininejad, Marjan, et al.
"Constant-time machine translation with conditional masked language models."
arXiv preprint arXiv:1904.09324 (2019).
"""
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
from fairseq.utils import new_arange
def _skeptical_unmasking(output_scores, output_masks, p):
sorted_index = output_scores.sort(-1)[1]
boundary_len = (
(output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p
).long()
skeptical_mask = new_arange(output_masks) < boundary_len
return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
@register_model("cmlm_transformer")
class CMLMNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
assert not self.decoder.src_embedding_copy, "do not support embedding copy."
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_mask = prev_output_tokens.eq(self.unk)
return {
"word_ins": {
"out": word_ins_out,
"tgt": tgt_tokens,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
step = decoder_out.step
max_step = decoder_out.max_step
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder
output_masks = output_tokens.eq(self.unk)
_scores, _tokens = self.decoder(
normalize=True,
prev_output_tokens=output_tokens,
encoder_out=encoder_out,
).max(-1)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
# skeptical decoding (depend on the maximum decoding steps.)
if (step + 1) < max_step:
skeptical_mask = _skeptical_unmasking(
output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step
)
output_tokens.masked_fill_(skeptical_mask, self.unk)
output_scores.masked_fill_(skeptical_mask, 0.0)
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("cmlm_transformer", "cmlm_transformer")
def cmlm_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", True)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
@register_model_architecture("cmlm_transformer", "cmlm_transformer_wmt_en_de")
def cmlm_wmt_en_de(args):
cmlm_base_architecture(args)
| 6,453
| 38.595092
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/iterative_nonautoregressive_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
# s: input batch
# V: vocabulary size
rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
choices = torch.rand(size=s.size(), device=s.device)
choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
replace = choices < beta / 3
repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
swap = (choices >= beta * 2 / 3) & (choices < beta)
safe = choices >= beta
for i in range(s.size(1) - 1):
rand_word = rand_words[:, i]
next_word = s[:, i + 1]
self_word = s[:, i]
replace_i = replace[:, i]
swap_i = swap[:, i] & (next_word != 3)
repeat_i = repeat[:, i] & (next_word != 3)
safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
s[:, i] = (
self_word * (safe_i | repeat_i).long()
+ next_word * swap_i.long()
+ rand_word * replace_i.long()
)
s[:, i + 1] = (
next_word * (safe_i | replace_i).long()
+ self_word * (swap_i | repeat_i).long()
)
return s
def gumbel_noise(input, TINY=1e-8):
return (
input.new_zeros(*input.size())
.uniform_()
.add_(TINY)
.log_()
.neg_()
.add_(TINY)
.log_()
.neg_()
)
@register_model("iterative_nonautoregressive_transformer")
class IterNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--train-step",
type=int,
help="number of refinement iterations during training",
)
parser.add_argument(
"--dae-ratio",
type=float,
help="the probability of switching to the denoising auto-encoder loss",
)
parser.add_argument(
"--stochastic-approx",
action="store_true",
help="sampling from the decoder as the inputs for next iteration",
)
@classmethod
def build_model(cls, args, task):
model = super().build_model(args, task)
model.train_step = getattr(args, "train_step", 4)
model.dae_ratio = getattr(args, "dae_ratio", 0.5)
model.stochastic_approx = getattr(args, "stochastic_approx", False)
return model
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
B, T = prev_output_tokens.size()
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
for t in range(self.train_step):
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
step=t,
)
word_ins_tgt = tgt_tokens
word_ins_mask = word_ins_tgt.ne(self.pad)
word_ins_outs.append(word_ins_out)
word_ins_tgts.append(word_ins_tgt)
word_ins_masks.append(word_ins_mask)
if t < (self.train_step - 1):
# prediction for next iteration
if self.stochastic_approx:
word_ins_prediction = (
word_ins_out + gumbel_noise(word_ins_out)
).max(-1)[1]
else:
word_ins_prediction = word_ins_out.max(-1)[1]
prev_output_tokens = prev_output_tokens.masked_scatter(
word_ins_mask, word_ins_prediction[word_ins_mask]
)
if self.dae_ratio > 0:
# we do not perform denoising for the first iteration
corrputed = (
torch.rand(size=(B,), device=prev_output_tokens.device)
< self.dae_ratio
)
corrputed_tokens = _sequential_poisoning(
tgt_tokens[corrputed],
len(self.tgt_dict),
0.33,
self.bos,
self.eos,
self.pad,
)
prev_output_tokens[corrputed] = corrputed_tokens
# concat everything
word_ins_out = torch.cat(word_ins_outs, 0)
word_ins_tgt = torch.cat(word_ins_tgts, 0)
word_ins_mask = torch.cat(word_ins_masks, 0)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
@register_model_architecture(
"iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
)
def inat_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.train_step = getattr(args, "train_step", 4)
args.dae_ratio = getattr(args, "dae_ratio", 0.5)
args.stochastic_approx = getattr(args, "stochastic_approx", False)
@register_model_architecture(
"iterative_nonautoregressive_transformer",
"iterative_nonautoregressive_transformer_wmt_en_de",
)
def iter_nat_wmt_en_de(args):
inat_base_architecture(args)
| 8,647
| 36.764192
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/nat/nat_crf_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel, base_architecture
from fairseq.modules import DynamicCRF
@register_model("nacrf_transformer")
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(
num_embedding=len(self.tgt_dict),
low_rank=args.crf_lowrank_approx,
beam_size=args.crf_beam_approx,
)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--crf-lowrank-approx",
type=int,
help="the dimension of low-rank approximation of transition",
)
parser.add_argument(
"--crf-beam-approx",
type=int,
help="the beam size for apporixmating the normalizing factor",
)
parser.add_argument(
"--word-ins-loss-factor",
type=float,
help="weights on NAT loss used to co-training with CRF loss.",
)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
# compute the log-likelihood of CRF
crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
"factor": self.args.word_ins_loss_factor,
},
"word_crf": {"loss": crf_nll},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder and get emission scores
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(
normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
# run viterbi decoding through CRF
_scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("nacrf_transformer", "nacrf_transformer")
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
base_architecture(args)
| 4,378
| 34.893443
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/hubert/hubert.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import (
ConvFeatureExtractionModel,
TransformerEncoder,
)
from fairseq.modules import GradMultiply, LayerNorm
from fairseq.tasks.hubert_pretraining import (
HubertPretrainingConfig,
HubertPretrainingTask,
)
from omegaconf import II
logger = logging.getLogger(__name__)
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class HubertConfig(FairseqDataclass):
label_rate: int = II("task.label_rate")
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for the transformer"},
)
attention_dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for attention weights"},
)
activation_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability after activation in FFN"},
)
encoder_layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a tarnsformer layer"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
},
)
untie_final_proj: bool = field(
default=False,
metadata={"help": "use separate projection for each target"},
)
layer_norm_first: bool = field(
default=False,
metadata={"help": "apply layernorm first in the transformer"},
)
conv_feature_layers: str = field(
default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2",
metadata={
"help": "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0,
metadata={"help": "multiply feature extractor var grads by this"},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={"help": "legacy (to be removed)"},
)
# loss computation
skip_masked: bool = field(
default=False,
metadata={"help": "skip computing losses over masked frames"},
)
skip_nomask: bool = field(
default=False,
metadata={"help": "skip computing losses over unmasked frames"},
)
@register_model("hubert", dataclass=HubertConfig)
class HubertModel(BaseFairseqModel):
def __init__(
self,
cfg: HubertConfig,
task_cfg: HubertPretrainingConfig,
dictionaries: List[Dictionary],
) -> None:
super().__init__()
logger.info(f"HubertModel Config: {cfg}")
feature_enc_layers = eval(cfg.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.untie_final_proj = cfg.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
cfg.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
# modules below are not needed during fine-tuning
if any([d is None for d in dictionaries]):
logger.info("cannot find dictionary. assume will be used for fine-tuning")
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: HubertConfig, task: HubertPretrainingTask):
"""Build a new model instance."""
model = HubertModel(cfg, task.cfg, task.dictionaries)
return model
def apply_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward_features(self, source: torch.Tensor) -> torch.Tensor:
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
return features
def forward_targets(
self,
features: torch.Tensor,
target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self,
features: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)
padding_mask = padding_mask.all(-1)
return padding_mask
def forward(
self,
source: torch.Tensor,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
features = self.forward_features(source)
if target_list is not None:
features, target_list = self.forward_targets(features, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask, target_list)
else:
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1,
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
if self.untie_final_proj:
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
else:
proj_x_m_list = [proj_x_m for _ in range(len(target_list))]
logit_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list))
]
else:
logit_m_list = [None for _ in target_list]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = self.final_proj(x[nomask_indices])
if self.untie_final_proj:
proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1)
else:
proj_x_u_list = [proj_x_u for _ in range(len(target_list))]
logit_u_list = [
compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i])
for i, (proj_x_u, t) in enumerate(zip(proj_x_u_list, target_list))
]
else:
logit_u_list = [None for _ in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
feature = res["features"] if ret_conv else res["x"]
return feature, res["padding_mask"]
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x.float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, net_output, is_masked=True):
logits_list = self.get_logits(net_output, is_masked)
targets_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list]
return targets_list
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
return extra_losses, names
def remove_pretraining_modules(self):
self.target_glu = None
self.final_proj = None
| 19,029
| 34.371747
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/hubert/hubert_asr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from argparse import Namespace
from typing import Any
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model
from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
from fairseq.tasks import FairseqTask
from omegaconf import II, MISSING
@dataclass
class HubertAsrConfig(FairseqDataclass):
w2v_path: str = field(default=MISSING, metadata={"help": "path to hubert model"})
no_pretrained_weights: bool = field(
default=False,
metadata={"help": "if true, does not load pretrained weights"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={"help": "dropout after transformer and before final projection"},
)
dropout: float = field(
default=0.0,
metadata={"help": "dropout probability inside hubert model"},
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights " "inside hubert model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN " "inside hubert model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask "
"(normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
freeze_finetune_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
feature_grad_mult: float = field(
default=0.0,
metadata={"help": "reset feature grad mult in hubert to this"},
)
layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a layer in hubert"},
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded hubert args
w2v_args: Any = None
@dataclass
class HubertCtcConfig(HubertAsrConfig):
pass
@register_model("hubert_ctc", dataclass=HubertCtcConfig)
class HubertCtc(BaseFairseqModel):
def __init__(self, cfg: HubertCtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: HubertCtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = HubertEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output):
logits = net_output["encoder_out"]
padding = net_output["encoder_padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float("-inf")
return logits
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class HubertSeq2SeqConfig(HubertAsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False,
metadata={"help": "apply layernorm before each decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings " "(outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights " "inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN " "inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False,
metadata={"help": "share decoder input and output embeddings"},
)
class HubertEncoder(FairseqEncoder):
def __init__(self, cfg: HubertAsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
if state is not None and "task_state" in state:
# This will load the stored "dictionaries" object
task.load_state_dict(state["task_state"])
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
model.load_state_dict(state["model"], strict=False)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| 12,353
| 33.127072
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/hubert/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hubert import * # noqa
from .hubert_asr import * # noqa
| 242
| 29.375
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/s2t_cif_transformer.py
|
#!/usr/bin/env python3
import sys
import logging
import argparse
import math
import copy
import edlib
import numpy as np
from typing import Dict, List, Optional, Tuple, Any
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, utils, tasks
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.transformer import Embedding, TransformerDecoder, TransformerConfig
from fairseq.modules.conformer_layer import ConformerEncoderLayer
from fairseq.modules.positional_encoding import RelPositionalEncoding
from fairseq.modules.rotary_positional_embedding import RotaryPositionalEmbedding
from fairseq.models.speech_to_text.cif_transformer import CifMiddleware
from fairseq.modules import (
FairseqDropout,
TransformerEncoderLayer,
AdaptiveSoftmax,
BaseLayer,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.wav2vec.wav2vec2 import TransformerSentenceEncoderLayer
from fairseq.modules import transformer_layer
from torch import Tensor
logger = logging.getLogger(__name__)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerDecoderBase":
return "TransformerDecoder"
else:
return module_name
# Expand the dimension of given tensors
def expand_tensor_dim(x, expand_size, target_dim=1, reduce=False):
assert target_dim == 1, "only the expansion at the second dimension is available."
rank = len(x.size())
unsq_x = x.unsqueeze(target_dim)
if rank == 1:
sz1 = x.size()
x = unsq_x.repeat(1, expand_size)
x = x.view(sz1 * expand_size) if reduce else x
elif rank == 2:
sz1, sz2 = x.size()
x = unsq_x.repeat(1, expand_size, 1)
x = x.view((sz1 * expand_size), sz2) if reduce else x
elif rank == 3:
sz1, sz2, sz3 = x.size()
x = unsq_x.repeat(1, expand_size, 1, 1)
x = x.view((sz1 * expand_size), sz2, sz3) if reduce else x
else:
raise NotImplementedError("Not supported rank %d" % rank)
return x
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
reverse (bool): Whether to reverse the input position. Only for
the class LegacyRelPositionalEncoding. We remove it in the current
class RelPositionalEncoding.
"""
def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class Conv1dSubsampler(nn.Module):
"""Convolutional subsampler: a stack of 1D convolution (along temporal
dimension) followed by non-linear activation via gated linear units
(https://arxiv.org/abs/1911.08460)
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
in_channels: int,
mid_channels: int,
out_channels: int,
kernel_sizes: List[int] = (3, 3),
):
super(Conv1dSubsampler, self).__init__()
self.n_layers = len(kernel_sizes)
self.conv_layers = nn.ModuleList(
nn.Conv1d(
in_channels if i == 0 else mid_channels // 2,
mid_channels if i < self.n_layers - 1 else out_channels * 2,
k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(kernel_sizes)
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in range(self.n_layers):
out = ((out.float() - 1) / 2 + 1).floor().long()
return out
def forward(self, src_tokens, src_lengths):
bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D)
x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
x = conv(x)
x = nn.functional.glu(x, dim=1)
# GLU activation will cause dimension discount 50% in default.
_, _, out_seq_len = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D)
return x, self.get_out_seq_lens_tensor(src_lengths)
class Conv2dSubsampler(torch.nn.Module):
"""Convolutional 2D subsampling (to 1/4 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim, odim, conv_output_channels, kernel_sizes):
"""Construct an Conv2dSubsampling object."""
super().__init__()
assert len(conv_output_channels) == len(kernel_sizes)
self._num_conv_layers = len(kernel_sizes)
self.conv = nn.ModuleList([])
for layer_id, (output_channel, kernel_size) in enumerate(
zip(conv_output_channels, kernel_sizes)
):
if layer_id == 0:
self.conv.append(torch.nn.Conv2d(1, output_channel, kernel_size, 2))
else:
prev_output_channel = conv_output_channels[layer_id - 1]
self.conv.append(
torch.nn.Conv2d(prev_output_channel, output_channel, kernel_size, 2)
)
self.conv.append(torch.nn.ReLU())
conv_final_dim = conv_output_channels[-1]
if self._num_conv_layers == 1:
self.out = torch.nn.Sequential(
torch.nn.Linear(conv_final_dim * ((idim - 1) // 2), odim),
)
elif self._num_conv_layers == 2:
self.out = torch.nn.Sequential(
torch.nn.Linear(conv_final_dim * (((idim - 1) // 2 - 1) // 2), odim),
)
else:
raise NotImplementedError("Not supported value.")
def forward(self, x, src_lens):
"""Subsample x."""
x = x.unsqueeze(1) # B x C x T x F
for module in self.conv:
x = module(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) # B x T x C
x = x.permute(1, 0, 2) # T x B x C
out_seq_lens = self.get_out_seq_lens_tensor(src_lens)
return x, out_seq_lens
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in range(self._num_conv_layers):
out = ((out.float() - 1) / 2).floor().long()
return out
def __getitem__(self, key):
"""Get item.
When reset_parameters() is called, if use_scaled_pos_enc is used,
return the positioning encoding.
"""
if key != -1:
raise NotImplementedError("Support only `-1` (for `reset_parameters`).")
return self.out[key]
class CtcConstrainedCifMiddleware(nn.Module):
def __init__(self, args):
super().__init__()
# Get configurations related to continuous integrate-and-fire
self.cif_threshold = args.cif_threshold
self.cif_output_dim = args.cif_embedding_dim
self.encoder_embed_dim = args.encoder_embed_dim
self.produce_weight_type = args.produce_weight_type
self.apply_scaling = args.apply_scaling
self.apply_tail_handling = args.apply_tail_handling
self.tail_handling_firing_threshold = args.tail_handling_firing_threshold
self.add_cif_ctxt_layers = args.add_cif_ctxt_layers
# Build weight projection layer to compute weight from encoder outputs
if self.produce_weight_type == "dense":
self.dense_proj = Linear(
self.encoder_embed_dim, args.dense_cif_units_num
).cuda()
self.weight_proj = Linear(args.dense_cif_units_num, 1).cuda()
elif self.produce_weight_type == "conv":
self.cif_conv_layer_num = args.conv_cif_layer_num
self.conv = torch.nn.Conv1d(
self.encoder_embed_dim,
args.conv_cif_output_channels_num,
args.conv_cif_width,
stride=1,
padding=int(args.conv_cif_width / 2),
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
).cuda()
self.conv_dropout = torch.nn.Dropout(p=args.conv_cif_dropout).cuda()
self.weight_proj = Linear(args.conv_cif_output_channels_num, 1).cuda()
else:
self.weight_proj = Linear(self.encoder_embed_dim, 1).cuda()
# Build the final projection layer for cif outputs
if self.cif_output_dim != self.encoder_embed_dim:
self.cif_output_proj = Linear(
self.encoder_embed_dim, self.cif_output_dim, bias=False
).cuda()
# Build cif contextual layers
if self.add_cif_ctxt_layers:
self.cif_ctxt_embed_dim = args.cif_ctxt_embed_dim
self.cif_ctxt_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=args.cif_ctxt_embed_dim,
ffn_embedding_dim=args.cif_ctxt_ffn_embed_dim,
num_attention_heads=args.cif_ctxt_attention_heads,
dropout=args.cif_ctxt_dropout,
activation_dropout=args.cif_ctxt_dropout
or args.cif_ctxt_activation_dropout,
attention_dropout=args.cif_ctxt_dropout
or args.cif_ctxt_activation_dropout,
layer_norm_first=args.cif_ctxt_normalize_before,
)
for _ in range(args.cif_ctxt_layers)
]
)
# CTC Constrained training settings
self.use_ctc_constraint = args.use_ctc_constraint
if self.use_ctc_constraint:
self.ctc_prob_threshold = args.ctc_prob_threshold
def forward(
self, encoder_outputs, target_lengths=None, input_lengths=None, ctc_logits=None
):
"""
Args:
encoder_out: B x T x C
encoder_padding_mask: B x T
targets_length: B
ctc_logits: B x T x V (including blank_token_id)
"""
# Prepare inputs
encoder_out = encoder_outputs["encoder_out"][0].transpose(0, 1) # B x T x C
if len(encoder_outputs["encoder_padding_mask"]) != 0:
encoder_padding_mask = encoder_outputs["encoder_padding_mask"][0] # B x T
else:
assert (
input_lengths is not None
), "Please ensure that input_lengths is provided."
encoder_padding_mask = lengths_to_padding_mask(input_lengths) # B x T
# Forward weight generation
if self.produce_weight_type == "dense":
proj_out = self.dense_proj(encoder_out)
act_proj_out = torch.relu(proj_out)
sig_input = self.weight_proj(act_proj_out)
weight = torch.sigmoid(sig_input)
# weight has shape [batch_size, length, 1]
elif self.produce_weight_type == "conv":
conv_input = encoder_out.permute(0, 2, 1)
# Adjust the shape of convolution layer input [B, C_in, T]
conv_out = self.conv(conv_input)
# conv_out has shape [B, C_out, T]
proj_input = conv_out.permute(0, 2, 1)
proj_input = self.conv_dropout(proj_input)
# Adjust conv output to shape [B, T, C_cif]
sig_input = self.weight_proj(proj_input)
sig_input = sig_input.float()
weight = torch.sigmoid(sig_input)
weight = weight.type_as(encoder_out)
else:
sig_input = self.weight_proj(encoder_out)
weight = torch.sigmoid(sig_input)
not_padding_mask = ~encoder_padding_mask
weight = (
torch.squeeze(weight, dim=-1) * not_padding_mask.int()
) # weight has shape B x T
org_weight = weight
# Sum weights
if self.training and self.apply_scaling and target_lengths is not None:
# if self.apply_scaling and target_lengths is not None: # For validation debugging
# Conduct scaling when training
# (target_lengths + 1 because this target_lengths does not take <eos> into consideration)
weight = weight.float()
weight_sum = weight.sum(-1) # weight_sum has shape [batch_size]
normalize_scalar = torch.unsqueeze(
target_lengths / (weight_sum + 1e-8), -1
) # B x 1
weight = weight * normalize_scalar
weight = weight.type_as(org_weight) # B x T
ctc_border_marks = None
if self.use_ctc_constraint and ctc_logits is not None:
ctc_probs = utils.softmax(
ctc_logits.transpose(0, 1).float(), dim=-1
) # B x T x V
# TODO: remember the default blank id should be <bos> id (0)
blank_probs = ctc_probs[:, :, 0] # B x T
non_blank_probs = 1.0 - blank_probs # B x T
ctc_border_marks = (
non_blank_probs > self.ctc_prob_threshold
).int() # B x T
# Seems like [[0,0,0,0,1,0,1], ...]
# Integrate and fire
batch_size = encoder_out.size(0)
max_length = encoder_out.size(1)
encoder_embed_dim = encoder_out.size(2)
padding_start_id = not_padding_mask.sum(-1) # B
# Initialize
accumulated_weights = torch.zeros(batch_size, 0, dtype=encoder_out.dtype).cuda()
accumulated_states = torch.zeros(
batch_size, 0, encoder_embed_dim, dtype=encoder_out.dtype
).cuda()
fired_states = torch.zeros(
batch_size, 0, encoder_embed_dim, dtype=encoder_out.dtype
).cuda()
ctc_accum_weights = (
torch.zeros(batch_size, 0, dtype=encoder_out.dtype).cuda()
if self.use_ctc_constraint
else None
) # B x T
# Begin integrate and fire
for i in range(max_length):
# Get previous states from the recorded tensor
prev_accumulated_weight = (
torch.zeros([batch_size], dtype=encoder_out.dtype).cuda()
if i == 0
else accumulated_weights[:, i - 1]
)
prev_accumulated_state = (
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda()
if i == 0
else accumulated_states[:, i - 1, :]
)
# Decide whether positioning a boundary
cur_is_fired = (
(prev_accumulated_weight + weight[:, i]) >= self.cif_threshold
).unsqueeze(dim=-1)
# cur_is_fired with shape [batch_size, 1]
# Update the accumulated weights by considering whether positioning a boundary
cur_weight = torch.unsqueeze(weight[:, i], -1)
# cur_weight has shape [batch_size, 1]
prev_accumulated_weight = torch.unsqueeze(prev_accumulated_weight, -1)
# prev_accumulated_weight also has shape [batch_size ,1]
remained_weight = (
torch.ones_like(prev_accumulated_weight, dtype=encoder_out.dtype).cuda()
- prev_accumulated_weight
)
# remained_weight with shape [batch_size ,1]
# Obtain the accumulated weight of current step
cur_accumulated_weight = torch.where(
cur_is_fired,
cur_weight - remained_weight,
cur_weight + prev_accumulated_weight,
) # B x 1
cur_ctc_accum_weight = None
if self.use_ctc_constraint and ctc_border_marks is not None:
if i == 0:
prev_ctc_accum_weight = torch.zeros(
[batch_size], dtype=encoder_out.dtype
).cuda() # B
else:
prev_ctc_border_marks = ctc_border_marks[:, i - 1] # B
prev_ctc_accum_weight = torch.where(
prev_ctc_border_marks.float() == 1.0, # B
torch.zeros([batch_size], dtype=encoder_out.dtype).cuda(), # B
ctc_accum_weights[:, i - 1], # B
) # B x 1
cur_ctc_accum_weight = prev_ctc_accum_weight.unsqueeze(-1) + cur_weight
# Obtain accumulated state of current step
cur_accumulated_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
(cur_weight - remained_weight) * encoder_out[:, i, :],
prev_accumulated_state + cur_weight * encoder_out[:, i, :],
) # B x C
# Obtain fired state of current step
# firing locations has meaningful representations, while non-firing locations is all-zero embeddings
cur_fired_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
prev_accumulated_state + remained_weight * encoder_out[:, i, :],
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
) # B x C
# Handling the speech tail by rounding up and down
if (not self.training) and self.apply_tail_handling:
# When encoder output position exceeds the max valid position,
# if accumulated weights is greater than tail_handling_firing_threshold,
# current state should be reserved, otherwise it is discarded.
# print("______________________")
# print(i)
# print("cur_accumulated_state:", cur_accumulated_state[:, :10])
# print("cur_accumulated_weight: ", cur_accumulated_weight)
# print(i == padding_start_id)
cur_fired_state = torch.where(
i
== padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
), # B x C
torch.where(
cur_accumulated_weight.repeat([1, encoder_embed_dim])
<= self.tail_handling_firing_threshold, # B x C
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
# less equal than tail_handling_firing_threshold, discarded.
cur_accumulated_state / (cur_accumulated_weight + 1e-10)
# bigger than tail_handling_firing_threshold, normalized and kept.
# eps = 1e-10 for preveting overflow.
),
cur_fired_state,
) # B x C
# For normal condition, including both training and evaluation
# Mask padded locations with all-zero embeddings
cur_fired_state = torch.where(
torch.full(
[batch_size, encoder_embed_dim], i, dtype=encoder_out.dtype
).cuda()
> padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
), # B x C
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
cur_fired_state,
)
# Update accumulated arguments
accumulated_weights = torch.cat(
(accumulated_weights, cur_accumulated_weight), 1
) # B x T
accumulated_states = torch.cat(
(accumulated_states, torch.unsqueeze(cur_accumulated_state, 1)), 1
) # shape = [B, L, D]
fired_states = torch.cat(
(fired_states, torch.unsqueeze(cur_fired_state, 1)), 1
) # shape = [B, L, D]
if self.use_ctc_constraint and cur_ctc_accum_weight is not None:
ctc_accum_weights = torch.cat(
[ctc_accum_weights, cur_ctc_accum_weight], -1
) # B x T
# Extracts cif_outputs for each utterance
fired_marks = (torch.abs(fired_states).sum(-1) != 0.0).int() # B x T
fired_utt_length = fired_marks.sum(-1) # B
fired_max_length = (
fired_utt_length.max().int()
) # The maximum of fired times in current batch
cif_outputs = torch.zeros(
[0, fired_max_length, encoder_embed_dim], dtype=encoder_out.dtype
).cuda() # Initialize cif outputs
cif_durations = torch.zeros(
[0, fired_max_length], dtype=torch.int32
).cuda() # Initialize cif durations
def dynamic_partition(
data: torch.Tensor, partitions: torch.Tensor, num_partitions=None
):
assert (
len(partitions.shape) == 1
), "Only one dimensional partitions supported"
assert (
data.shape[0] == partitions.shape[0]
), "Partitions requires the same size as data"
if num_partitions is None:
num_partitions = max(torch.unique(partitions))
return [data[partitions == part_id] for part_id in range(num_partitions)]
for j in range(batch_size):
# Get information of j-th sample
cur_utt_fired_mark = fired_marks[j, :]
cur_utt_fired_state = fired_states[j, :, :]
cur_utt_outputs = dynamic_partition(
cur_utt_fired_state, cur_utt_fired_mark, 2
)
cur_utt_output = cur_utt_outputs[1] # Get integrated representations
cur_utt_length = cur_utt_output.size(0) # The total number of firing
pad_length = fired_max_length - cur_utt_length # Calculate padding length
cur_utt_output = torch.cat(
(
cur_utt_output,
torch.full(
[pad_length, encoder_embed_dim], 0.0, dtype=encoder_out.dtype
).cuda(),
),
dim=0,
) # Pad current utterance cif outputs to fired_max_length
cur_utt_output = torch.unsqueeze(cur_utt_output, 0)
# Reshape to [1, fired_max_length, encoder_embed_dim]
# Concatenate cur_utt_output and cif_outputs along batch axis
cif_outputs = torch.cat([cif_outputs, cur_utt_output], 0)
# Collect cif durations
cur_fired_indices = torch.nonzero(cur_utt_fired_mark)[:, -1]
shifted_cur_fired_indices = torch.cat(
[-1 * torch.ones([1], dtype=torch.int32).cuda(), cur_fired_indices],
dim=-1,
)[: cur_fired_indices.size(0)]
cur_cif_durations = cur_fired_indices - shifted_cur_fired_indices
cur_cif_durations = torch.cat(
(
cur_cif_durations,
torch.full([pad_length], 0, dtype=torch.int32).cuda(),
),
dim=0,
).unsqueeze(dim=0)
cif_durations = torch.cat(
[cif_durations, cur_cif_durations], dim=0
) # cancat at batch axis
cif_out_padding_mask = (torch.abs(cif_outputs).sum(-1) != 0.0).int()
# cif_out_padding_mask shape = [batch_size, fired_max_length], where locations with value 0 is False.
if self.training:
# In training phase, use the sum of original weights
# as quantity out for quantity loss.
quantity_out = org_weight.sum(-1)
else:
quantity_out = weight.sum(-1)
if self.cif_output_dim != encoder_embed_dim:
cif_outputs = self.cif_output_proj(cif_outputs)
ctxt_cif_outputs = None
if self.add_cif_ctxt_layers and self.cif_output_dim == self.cif_ctxt_embed_dim:
x = cif_outputs.transpose(0, 1)
padding_mask = ~cif_out_padding_mask.bool()
for layer in self.cif_ctxt_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
ctxt_cif_outputs = x.transpose(0, 1)
ctc_align_outputs = None
if self.use_ctc_constraint and ctc_accum_weights is not None:
org_ctc_align_outputs = ctc_accum_weights * ctc_border_marks # B x T_a
ctc_align_max_len = ctc_border_marks.size(1)
ctc_align_outputs = (
torch.zeros([0, ctc_align_max_len])
.type_as(org_ctc_align_outputs)
.cuda()
)
for k in range(batch_size):
cur_border_marks = ctc_border_marks[k, :] # T
cur_borders_num = cur_border_marks.sum() # 1
cur_ctc_accum_weight = ctc_accum_weights[k, :] # T_a
compressed_ctc_weight = cur_ctc_accum_weight[
cur_border_marks.float() != 0.0
]
pad_length = ctc_align_max_len - cur_borders_num # get padding length
padded_compressed_ctc_weight = torch.cat(
[
compressed_ctc_weight,
torch.full([pad_length], 0.0)
.type_as(compressed_ctc_weight)
.cuda(),
],
dim=0,
).unsqueeze(
0
) # 1 x T
ctc_align_outputs = torch.cat(
[ctc_align_outputs, padded_compressed_ctc_weight], dim=0
) # B x T
return {
"cif_out": cif_outputs, # shape = [batch_size, fired_max_length, cif_output_dim]
"cif_out_padding_mask": cif_out_padding_mask, # shape = [batch_size, fired_max_length]
"ctxt_cif_out": ctxt_cif_outputs, # shape = [batch_size, fired_max_length, cif_ctxt_embed_dim]
"quantity_out": quantity_out, # shape = [batch_size]
"cif_durations": cif_durations, # shape = [batch_size, fired_max_length]
"ctc_align_outputs": ctc_align_outputs, # B x T
}
# Confidence Estimation Module
class UncertaintyEstimationModule(nn.Module):
def __init__(self, args, dict):
super().__init__()
self._uem_input_state = args.uem_input_state.strip().split(",")
self.vsz = len(dict)
self.expand_size = 1 + args.K_corr_samp
# Determine the input dim for UEM
uem_input_dim = 0
for state in args.uem_input_state.strip().split(","):
if state == "cif_outputs":
uem_input_dim += args.cif_embedding_dim
elif state == "decoder_states":
uem_input_dim += args.decoder_embed_dim
elif state == "logits":
uem_input_dim += self.vsz
elif state == "pred_embeds":
uem_input_dim += args.decoder_embed_dim
else:
raise NotImplementedError("Unknown input type: %s" % state)
# Build modules for UEM
self.dropout_module = FairseqDropout(
args.corr_dropout,
module_name=module_name_fordropout(self.__class__.__name__),
)
self.use_uem_bn_layer = args.use_uem_bn_layer
if self.use_uem_bn_layer:
self.uem_bottleneck_proj = nn.Linear(uem_input_dim, args.uem_bn_proj_dim)
self.uem_pred_proj = nn.Linear(args.uem_bn_proj_dim, 1)
else:
self.uem_pred_proj = nn.Linear(uem_input_dim, 1)
def forward(
self,
cif_outputs,
decoder_states,
logits=None,
pred_embeds=None,
prev_output_tokens=None,
):
# regulerize lengths
x = cif_outputs # B x T x C
min_reg_len = min(x.size(1), prev_output_tokens.size(1))
x = x[:, :min_reg_len, :]
decoder_states = decoder_states[:, :min_reg_len, :]
logits = logits[:, :min_reg_len, :] if logits is not None else None
pred_embeds = (
pred_embeds[:, :, :min_reg_len, :] if pred_embeds is not None else None
)
pred_embeds = (
pred_embeds.view(-1, pred_embeds.size(-2), pred_embeds.size(-1))
if pred_embeds is not None
else None
) # B x T x C
# Expand size
x = expand_tensor_dim(x, expand_size=self.expand_size, reduce=True)
decoder_states = (
expand_tensor_dim(
decoder_states,
expand_size=self.expand_size,
reduce=True,
)
if decoder_states is not None
else None
)
logits = (
expand_tensor_dim(logits, expand_size=self.expand_size, reduce=True)
if logits is not None
else None
)
input_list = [x, decoder_states]
if logits is not None and "logits" in self._uem_input_state:
input_list.append(logits)
if pred_embeds is not None and "pred_embeds" in self._uem_input_state:
input_list.append(pred_embeds)
x = torch.cat(input_list, dim=-1) # B x T x C
x = self.dropout_module(x)
# Forward bottleneck layer
bottleneck_embeds = None
if self.use_uem_bn_layer:
x = self.uem_bottleneck_proj(x)
x = torch.relu(x)
x = self.dropout_module(x)
bottleneck_embeds = x # B x T x C
x = self.uem_pred_proj(x) # B x T x 1
x = x.squeeze(-1) # B x T
return x, bottleneck_embeds
# Correction Decoder
class CorrectionDecoder(nn.Module):
def __init__(self, args, dict):
super().__init__()
self.expand_size = 1 + args.K_corr_samp
# Preparation for transformer masking
self._future_mask = torch.empty(0)
# Get original dict
ori_dict_size = len(dict)
self.padding_idx = dict.pad()
self.dict_size = (
ori_dict_size + 1
) # extra one placeholder for no correcting option <no-cor>
# Get hyper-parameters
self._cordec_input_state = args.cordec_input_state.strip().split(",")
self._cordec_output_state = args.cordec_output_state.strip().split(",")
# build input projection
input_dim = 0
for state in self._cordec_input_state:
if state == "cif_outputs":
input_dim += args.cif_embedding_dim
elif state == "decoder_states":
input_dim += args.decoder_embed_dim
elif state == "pred_embeds":
input_dim += args.decoder_embed_dim
else:
raise NotImplementedError("Unknown options.")
self.input_proj = nn.Linear(input_dim, args.decoder_embed_dim)
# build position embedding
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
args.decoder_embed_dim, # decoder embed dim
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
# build input dropout
self.dropout_module = FairseqDropout(
args.corr_dropout,
module_name=module_name_fordropout(self.__class__.__name__),
)
# build transformer layers
args_dict = vars(args)
args_dict["dropout"] = args_dict["corr_dropout"]
args_dict["attention_dropout"] = args_dict["corr_attention_dropout"]
args_dict["activation_dropout"] = args_dict["corr_activation_dropout"]
args = argparse.Namespace(**args_dict)
self.cordec_tfm_layers = nn.ModuleList(
[self.build_transformer_layer(args) for _ in range(args.num_cordec_layers)]
)
# build final layer norm
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(args.decoder_embed_dim)
else:
self.layer_norm = None
# build final projection
output_dim = 0
for state in self._cordec_output_state:
if state == "cordec_state":
output_dim += args.cif_embedding_dim
elif state == "bn_embeds":
output_dim += args.uem_bn_proj_dim
else:
raise NotImplementedError("Unknown options.")
self.output_proj = nn.Linear(output_dim, args.decoder_embed_dim)
self.stop_bn_grad = args.stop_bn_grad
# build prediction projection
self.pred_proj = nn.Linear(args.decoder_embed_dim, self.dict_size)
def build_transformer_layer(self, cfg, no_encoder_attn=True):
layer = transformer_layer.TransformerDecoderLayerBaseDirectArgs(
cfg, no_encoder_attn=no_encoder_attn
)
return layer
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def forward(
self,
cif_outputs,
decoder_states,
pred_embeds=None,
uem_bn_embeds=None,
prev_output_tokens=None,
incremental_state=None,
):
if uem_bn_embeds is not None and self.stop_bn_grad:
uem_bn_embeds = uem_bn_embeds.detach()
x = cif_outputs
# regularize input lengths
if prev_output_tokens is not None:
min_reg_len = min(x.size(1), prev_output_tokens.size(1))
else:
min_reg_len = x.size(1)
x = x[:, :min_reg_len, :]
decoder_states = decoder_states[:, :min_reg_len, :]
prev_output_tokens = (
prev_output_tokens[:, :min_reg_len]
if prev_output_tokens is not None
else None
)
pred_embeds = (
pred_embeds[:, :, :min_reg_len, :] if pred_embeds is not None else None
)
pred_embeds = (
pred_embeds.view(-1, pred_embeds.size(-2), pred_embeds.size(-1))
if pred_embeds is not None
else None
) # (B x (1 + K)) x T x C
uem_bn_embeds = (
uem_bn_embeds[:, :min_reg_len, :] if uem_bn_embeds is not None else None
) # (B x (1 + K)) x T x C
# Expand size
x = expand_tensor_dim(x, expand_size=self.expand_size, reduce=True)
decoder_states = (
expand_tensor_dim(
decoder_states,
expand_size=self.expand_size,
reduce=True,
)
if decoder_states is not None
else None
)
prev_output_tokens = (
expand_tensor_dim(
prev_output_tokens, expand_size=self.expand_size, reduce=True
)
if prev_output_tokens is not None
else None
)
# Get inputs for correction decoder
input_list = [x, decoder_states]
if "pred_embeds" in self._cordec_input_state and pred_embeds is not None:
input_list.append(pred_embeds)
x = torch.cat(input_list, dim=-1) # B x T x C
x = self.input_proj(x) if self.input_proj is not None else x # B x T x C
# Embed positions & dropout
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
x += positions
x = self.dropout_module(x)
x = x.transpose(0, 1) # T x B x C
# Prepare future mask
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.cordec_tfm_layers):
self_attn_mask = self.buffered_future_mask(x) # T x T
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # B x T
x, _, _ = layer(
x,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
inner_states.append(x)
x = x.transpose(0, 1) # B x T x C
if self.layer_norm is not None:
x = self.layer_norm(x)
if self.output_proj is not None:
if uem_bn_embeds is not None and "bn_embeds" in self._cordec_output_state:
x = torch.cat([x, uem_bn_embeds], dim=-1)
x = self.output_proj(x)
x = torch.relu(x)
if self.pred_proj is not None:
x = self.pred_proj(x) # logits, B x T x V
return x, inner_states
# Preparation Module
class PrepareModule(nn.Module):
def __init__(self, args, dict):
super().__init__()
# dictionary settings
self.dict = dict
self.padding_id = dict.pad()
self.no_correction_id = len(dict) # max_id + 1
# Settings about generating labels for correction modules
self.corr_tgt_type = args.corr_tgt_type
self.K_corr_samp = args.K_corr_samp
def forward(self, encoder_out, decoder_out, prev_output_tokens=None, targets=None):
decoder_logits = decoder_out[0] # B x T x V
decoder_states = decoder_out[-1] # B x T x C
cif_outputs = encoder_out["encoder_out"][0] # B x T x C
cif_padding_mask = encoder_out["encoder_padding_mask"][0] # B x T
# Regularize length
cif_len = cif_padding_mask.size(-1)
tgt_len = prev_output_tokens.size(-1)
reg_min_len = min(cif_len, tgt_len)
decoder_logits = decoder_logits[:, :reg_min_len, :]
decoder_states = decoder_states[:, :reg_min_len, :]
cif_outputs = cif_outputs[:, :reg_min_len, :]
cif_padding_mask = cif_padding_mask[:, :reg_min_len]
# Prepare labels for UEM and Correction Decoder
uem_labels, cordec_labels = None, None
uem_padding_mask, sample_full_preds = None, None
if targets is not None:
targets = targets[:, :reg_min_len] # B x T
(
uem_labels_list,
uem_padding_mask_list,
cordec_ce_labels_list,
sample_pred_list,
) = ([], [], [], [])
if "tf-argmax" in self.corr_tgt_type:
argmax_preds = torch.argmax(decoder_logits, dim=-1) # B x T
uem_argmax_labels = (targets != argmax_preds).int() # B x T
cordec_ce_labels = targets * uem_argmax_labels # B x T
uem_padding_mask = (targets != self.padding_id).int()
uem_argmax_labels = expand_tensor_dim(
uem_argmax_labels, expand_size=1
) # B x 1 x T
cordec_ce_labels = expand_tensor_dim(
cordec_ce_labels, expand_size=1
) # B x 1 x T
uem_padding_mask = expand_tensor_dim(
uem_padding_mask, expand_size=1
) # B x 1 x T
argmax_preds = expand_tensor_dim(
argmax_preds, expand_size=1
) # B x 1 x T
argmax_preds = torch.where(
expand_tensor_dim(targets, expand_size=1) != self.padding_id,
argmax_preds,
expand_tensor_dim(targets, expand_size=1),
)
uem_labels_list.append(uem_argmax_labels)
uem_padding_mask_list.append(uem_padding_mask)
cordec_ce_labels_list.append(cordec_ce_labels)
sample_pred_list.append(argmax_preds)
if "tf-sample" in self.corr_tgt_type:
bsz, tsz = targets.size()
_, _, vsz = decoder_logits.size()
expd_targets = expand_tensor_dim(
targets, expand_size=self.K_corr_samp, reduce=False
) # B x K x T
dec_probs = self.get_probs_from_logits(
decoder_logits, log_probs=False
) # B x T x V
expd_dec_probs = expand_tensor_dim(
dec_probs, expand_size=self.K_corr_samp, reduce=False
) # B x K x T x V
sampled_preds = torch.multinomial(
expd_dec_probs.view(-1, vsz), # (B x K x T) x V
num_samples=1,
replacement=True,
) # (B x K x T) x 1
sampled_preds = sampled_preds.view(
bsz, self.K_corr_samp, tsz
) # B x K x T
sampled_preds = torch.where(
expd_targets != self.padding_id,
sampled_preds,
expd_targets,
)
uem_sample_labels = (sampled_preds != expd_targets).int() # B x K x T
cordec_ce_labels = expd_targets * uem_sample_labels # B x K x T
uem_labels_list.append(uem_sample_labels)
uem_padding_mask_list.append((expd_targets != self.padding_id).int())
cordec_ce_labels_list.append(cordec_ce_labels)
sample_pred_list.append(sampled_preds)
uem_labels = torch.cat(uem_labels_list, dim=1) # B x (K + 1) x T
cordec_labels = torch.cat(cordec_ce_labels_list, dim=1) # B x (K + 1) x T
uem_padding_mask = torch.cat(
uem_padding_mask_list, dim=1
) # B x (K + 1) x T
sample_full_preds = torch.cat(sample_pred_list, dim=1) # B x (K + 1) x T
uem_labels = (
uem_labels * uem_padding_mask
) # Set padding label to confidence label
# TODO: make the zero value in cordec_labels with no_correction_mark
cordec_labels = torch.where(
uem_padding_mask.bool(),
cordec_labels,
self.padding_id * torch.ones_like(cordec_labels),
) # Replace padded locations with pad token
cordec_labels = torch.where(
cordec_labels.float() != 0.0,
cordec_labels,
self.no_correction_id * torch.ones_like(cordec_labels),
) # Replace the zero elements woth no_correction token
return (
cif_outputs,
cif_padding_mask,
decoder_states,
decoder_logits,
uem_labels,
cordec_labels,
uem_padding_mask,
sample_full_preds,
)
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""
Get normalized probabilities (or log probs) from logits.
"""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
# Model Main Body
@register_model("s2t_cif_transformer")
class S2TCifTransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as down-sample input
sequence for computational efficiency."""
def __init__(self, encoder, decoder, uem=None, cordec=None, ppm=None, args=None):
# Register encoder and decoder
super().__init__(encoder, decoder)
# Register correction module
self.uem = uem
self.cordec = cordec
self.ppm = ppm
# Register args
if args is not None:
self.args = args
if args.apply_bert_distill:
logger.info("Use BERT distillation. ")
self.tokenwise_cif_dis_proj = nn.Linear(
args.cif_embedding_dim, args.bert_distill_feat_dim
)
self.semantic_cif_dis_proj = nn.Linear(
args.cif_embedding_dim, args.bert_distill_feat_dim
)
self.tokenwise_dec_state_proj = nn.Linear(
args.decoder_embed_dim, args.bert_distill_feat_dim
)
# Load initial model from target path
self.init_model_path = args.load_init_asr_model_from
if self.training and self.init_model_path:
logger.info("Load initial model from %s" % self.init_model_path)
state = torch.load(self.init_model_path, map_location=torch.device("cpu"))
params_dict = dict()
for k, v in state["model"].items():
logging.info(f"{k}")
params_dict[k] = v
self.load_state_dict(params_dict, strict=False)
logging.info(
"Remeber to convert batch normlization from train mode to eval mode!!!!"
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# input
parser.add_argument(
"--frontend-type",
type=str,
default="conv1d",
help="the type of frontend acoustic low-level extraction",
)
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
parser.add_argument(
"--conv2d-output-channels",
type=str,
help="# of channels in Conv2d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers",
type=int,
metavar="N",
help="num encoder layers",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-dropout",
type=float,
help="decoder dropout probability",
)
parser.add_argument(
"--decoder-attention-dropout",
type=float,
help="decoder dropout probability for attention weights",
)
parser.add_argument(
"--decoder-activation-dropout",
type=float,
help="decoder dropout probability after activation in FFN.",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block,"
" if true apply ln before each module in each block,"
" else apply after each residual outputs",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
parser.add_argument(
"--cross-self-attention",
type=bool,
default=False,
)
parser.add_argument(
"--no-decoder-final-norm",
type=bool,
)
parser.add_argument(
"--decoder-layerdrop",
type=float,
)
parser.add_argument(
"--do-encoder-attn",
action="store_true",
)
parser.add_argument(
"--decoder-enc-attn-kv-type",
type=str,
)
parser.add_argument(
"--do-decoder-nar",
action="store_true", # default false
help="whether to conduct non-auto-regressive (NAR) decoding for ASR decoder",
)
parser.add_argument(
"--decoder-nar-pad-type",
type=str,
help="specify the type of NAR decoder input padding mask, options: triangle, full",
)
parser.add_argument(
"--add-pos-to-cif",
action="store_true",
help="whether to add position encoding or embedding to cif inputs of the NAR decoder",
)
# Encoder layer down-sampling settings
parser.add_argument(
"--layer-downsampling",
action="store_true",
help="whether conduct down-sampling between layers",
)
parser.add_argument(
"--pooling-layer-ids",
type=str,
)
# Cif settings
parser.add_argument(
"--cif-embedding-dim",
type=int,
help="the dimension of the inputs of cif module",
)
parser.add_argument(
"--produce-weight-type",
type=str,
help="choose how to produce the weight for accumulation",
)
parser.add_argument(
"--cif-threshold", type=float, help="the threshold of firing"
)
parser.add_argument(
"--conv-cif-layer-num",
type=int,
help="the number of convolutional layers for cif weight generation",
)
parser.add_argument(
"--conv-cif-width",
type=int,
help="the width of kernel of convolutional layers",
)
parser.add_argument(
"--conv-cif-output-channels-num",
type=int,
help="the number of output channels of cif convolutional layers",
)
parser.add_argument(
"--conv-cif-dropout",
type=float,
)
parser.add_argument(
"--dense-cif-units-num",
type=int,
)
parser.add_argument("--apply-scaling", type=bool, default=True)
parser.add_argument(
"--apply-tail-handling",
type=bool,
default=True,
)
parser.add_argument(
"--tail-handling-firing-threshold",
type=float,
)
parser.add_argument(
"--add-cif-ctxt-layers",
action="store_true",
)
parser.add_argument(
"--cif-ctxt-layers",
type=int,
)
parser.add_argument(
"--cif-ctxt-embed-dim",
type=int,
)
parser.add_argument(
"--cif-ctxt-ffn-embed-dim",
type=int,
)
parser.add_argument(
"--cif-ctxt-attention-heads",
type=int,
)
parser.add_argument(
"--cif-ctxt-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-activation-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-attention-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-normalize-before",
type=bool,
)
# Other settings
parser.add_argument(
"--calulate-ctc-logits",
type=bool,
default=True,
)
parser.add_argument("--use-ctc-constraint", action="store_true")
parser.add_argument(
"--ctc-prob-threshold",
type=float,
default=0.5,
)
# Uncertainty Estimation Module (UEM) settings
parser.add_argument("--use-uem", action="store_true") # args.use_uem
parser.add_argument(
"--uem-input-state",
type=str,
default="cif_outputs,decoder_states,logits",
)
parser.add_argument(
"--use-uem-bn-layer",
action="store_true",
)
parser.add_argument(
"--uem-bn-proj-dim",
type=int,
default=512,
)
# Correction Decoder (Cordec) settings
parser.add_argument("--use-cordec", action="store_true")
parser.add_argument(
"--num-cordec-layers",
type=int,
default=4, # could be 4 or 2
)
parser.add_argument(
"--uncertainty-embed-fusion-mode",
type=str,
default="top-concat",
)
parser.add_argument(
"--cordec-input-state",
type=str,
default="cif_outputs,decoder_states",
)
parser.add_argument(
"--cordec-output-state",
type=str,
default="cordec_state,bn_embeds",
)
parser.add_argument(
"--corr-tgt-type",
type=str,
default="tf-argmax,tf-sample",
)
parser.add_argument(
"--K-corr-samp",
type=int,
default=5,
)
parser.add_argument(
"--freeze-asr-main-body",
action="store_true",
)
parser.add_argument(
"--load-init-asr-model-from",
type=str,
default="",
)
parser.add_argument(
"--corr-dropout",
type=float,
metavar="D",
help="correction module dropout probability",
)
parser.add_argument(
"--corr-attention-dropout",
type=float,
metavar="D",
help="correction module dropout probability for attention weights",
)
parser.add_argument(
"--corr-activation-dropout",
"--corr-relu-dropout",
type=float,
metavar="D",
help="correction module dropout probability " "after activation in FFN.",
)
parser.add_argument("--stop-bn-grad", action="store_true")
parser.add_argument(
"--fetch-decoder-states-from", type=str, default="tfm_outputs"
)
parser.add_argument(
"--encoder-attn-type",
type=str,
default="normal",
)
# Conformer encoder settings
parser.add_argument("--apply-conformer-encoder", action="store_true")
parser.add_argument(
"--conformer-attn-type",
type=str,
default="espnet",
)
parser.add_argument(
"--conformer-pos-enc-type",
type=str,
default="rel_pos",
)
parser.add_argument(
"--conformer-depthwise-conv-kernel-size",
type=int,
default=15,
)
# BERT Distillation Settings
parser.add_argument(
"--apply-bert-distill",
action="store_true",
)
parser.add_argument(
"--use-contextualized-cif-feats-for-distill",
action="store_true",
)
parser.add_argument(
"--bert-distill-feat-dim",
type=int,
default=768,
)
@classmethod
def build_encoder(cls, args, task):
if args.layer_downsampling:
if args.apply_conformer_encoder:
encoder = S2TCifConformerLayerPoolingEncoder(args, task)
else:
encoder = S2TCifTransformerLayerPoolingEncoder(args, task)
else:
encoder = S2TCifTransformerEncoder(args, task)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
return CifArTransformerDecoder(args, task.target_dictionary, embed_tokens)
@classmethod
def build_uncertainty_estimation_module(cls, args, task):
return UncertaintyEstimationModule(args, task.target_dictionary)
@classmethod
def build_correction_decoder(cls, args, task):
return CorrectionDecoder(args, task.target_dictionary)
@classmethod
def build_prepare_module(cls, args, task):
return PrepareModule(args, task.target_dictionary)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
# Main body
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
# Extra modules
ppm, uem, cordec = None, None, None
if args.use_uem or args.use_cordec:
ppm = cls.build_prepare_module(args, task=task)
if args.use_uem:
uem = cls.build_uncertainty_estimation_module(args, task=task)
if args.use_cordec:
cordec = cls.build_correction_decoder(args, task=task)
return cls(encoder, decoder, uem=uem, cordec=cordec, ppm=ppm, args=args)
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""
Get normalized probabilities (or log probs) from logits.
"""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(
self, src_tokens, src_lengths, prev_output_tokens, target_lengths, **kwargs
):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
if self.args.freeze_asr_main_body:
with torch.no_grad():
encoder_out = self.encoder(
src_tokens=src_tokens,
src_lengths=src_lengths,
target_lengths=target_lengths,
)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
else:
encoder_out = self.encoder(
src_tokens=src_tokens,
src_lengths=src_lengths,
target_lengths=target_lengths,
)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
cordec_labels, cordec_logits = None, None
uem_labels, uem_logits, uem_padding_mask, cordec_full_labels = (
None,
None,
None,
None,
)
if self.ppm is not None:
target = (
None if "target" not in kwargs.keys() else kwargs["target"]
) # B x T
(
cif_outputs,
cif_padding_mask,
decoder_states,
decoder_logits,
uem_labels,
cordec_labels,
uem_padding_mask,
sampled_full_labels,
) = self.ppm(
encoder_out=encoder_out,
decoder_out=decoder_out,
prev_output_tokens=prev_output_tokens,
targets=target,
)
uem_bn_embeds = None
pred_embeds = self.decoder.embed_tokens(
sampled_full_labels
).detach() # B x (1 + K) x T x C
if self.uem is not None:
uem_logits, uem_bn_embeds = self.uem(
cif_outputs=cif_outputs,
decoder_states=decoder_states,
logits=decoder_logits,
pred_embeds=pred_embeds,
prev_output_tokens=prev_output_tokens,
)
if self.cordec is not None:
cordec_logits, _ = self.cordec(
cif_outputs=cif_outputs,
decoder_states=decoder_states,
pred_embeds=pred_embeds,
uem_bn_embeds=uem_bn_embeds,
prev_output_tokens=prev_output_tokens,
)
token_distill_cif_feat = None
semantic_distill_cif_feat = None
token_distill_decoder_states = None
if self.args.apply_bert_distill:
# obtain raw cif features / contextualized cif features
if self.args.use_contextualized_cif_feats_for_distill:
cif_outputs_for_distill = encoder_out["ctxt_cif_out"][0]
else:
cif_outputs_for_distill = encoder_out["encoder_out"][0]
cif_padding_for_distill = encoder_out["encoder_padding_mask"][0]
cif_outputs_for_distill = (
cif_outputs_for_distill * cif_padding_for_distill.unsqueeze(-1)
) # B x T x C
# process tokenwise acoustic cif feats
token_distill_cif_feat = cif_outputs_for_distill # B x T x C
token_distill_cif_feat = self.tokenwise_cif_dis_proj(
token_distill_cif_feat
) # B x T x C_bert
# process semantic acoustic cif feats
cif_lengths = (
cif_padding_for_distill.int().sum(-1).type_as(cif_outputs_for_distill)
)
cif_length_scale = torch.reciprocal(cif_lengths).type_as(
cif_outputs_for_distill
) # B
semantic_distill_cif_feat = cif_outputs_for_distill.sum(1) # B x C
semantic_distill_cif_feat = (
semantic_distill_cif_feat * cif_length_scale.unsqueeze(-1)
) # B x C
semantic_distill_cif_feat = self.semantic_cif_dis_proj(
semantic_distill_cif_feat
) # B x C_bert
# process decoder states for bert distillation
token_distill_decoder_states = decoder_out[-1] # B x T x C
token_distill_decoder_states = self.tokenwise_dec_state_proj(
token_distill_decoder_states
)
# return decoder_out
final_outputs = {
# Encoder part outputs
"encoder_padding_mask": encoder_out["raw_encoder_padding_mask"][0], # B x T
"ctc_logits": encoder_out["ctc_logits"][0].transpose(0, 1), # B x T x V
# Cif module outputs
"quantity_out": encoder_out["quantity_out"][
0
], # Quantity out for quantity loss calculation
"ctc_align_outputs": encoder_out["ctc_align_outputs"][0]
if encoder_out["ctc_align_outputs"]
else None, # B x T
"cif_out": encoder_out["encoder_out"][
0
], # CIF out for decoder prediction, B x T x C
"cif_out_padding_mask": encoder_out["encoder_padding_mask"][0], # B x T
# Decoder part outputs
"decoder_out": decoder_out, # Decoder outputs (which is final logits for ce calculation), B x T x V
# UEM & Cordec outputs
"uem_logits": uem_logits,
"uem_labels": uem_labels,
"cordec_logits": cordec_logits,
"cordec_labels": cordec_labels,
"cordec_full_labels": cordec_full_labels,
"uem_padding_mask": uem_padding_mask, # B x T
# BERT distillation outputs
"token_distill_cif_feat": token_distill_cif_feat,
"semantic_distill_cif_feat": semantic_distill_cif_feat,
"token_distill_decoder_states": token_distill_decoder_states,
}
return final_outputs
def get_cif_output(self, src_tokens, src_lengths, target_lengths=None):
with torch.no_grad():
encoder_out = self.encoder(
src_tokens=src_tokens,
src_lengths=src_lengths,
target_lengths=target_lengths,
)
return {
# Cif outputs
"cif_out": encoder_out["encoder_out"][0], # B x T x C
"cif_out_padding_mask": encoder_out["encoder_padding_mask"][0], # B x T
"cif_durations": encoder_out["cif_durations"][0],
# Raw encoder acoustic outputs
"encoder_out": encoder_out["raw_encoder_out"][0], # T x B x C
"encoder_padding_mask": encoder_out["raw_encoder_padding_mask"][0], # B x T
}
def step_forward_decoder(
self, prev_decoded_tokens, cif_outputs, incremental_state=None
):
for k, v in cif_outputs.items():
if cif_outputs[k] is not None:
cif_outputs[k] = [v]
else:
cif_outputs[k] = None
cif_outputs["encoder_out"] = cif_outputs["cif_out"]
cif_outputs["encoder_padding_mask"] = cif_outputs["cif_out_padding_mask"]
cif_outputs["raw_encoder_out"] = cif_outputs["raw_encoder_out"]
cif_outputs["raw_encoder_padding_mask"] = cif_outputs[
"raw_encoder_padding_mask"
]
with torch.no_grad():
decoder_out = self.decoder(
prev_output_tokens=prev_decoded_tokens,
encoder_out=cif_outputs,
incremental_state=incremental_state,
)
return decoder_out
def forward_uem(
self, cif_outputs, decoder_states, decoder_logits, prev_output_tokens
):
return self.uem(
cif_outputs=cif_outputs,
decoder_states=decoder_states,
logits=decoder_logits,
prev_output_tokens=prev_output_tokens,
)
def forward_cordec(
self, cif_outputs, decoder_states, uem_bn_embeds, prev_output_tokens
):
return self.cordec(
cif_outputs=cif_outputs,
decoder_states=decoder_states,
uem_bn_embeds=uem_bn_embeds,
prev_output_tokens=prev_output_tokens,
)
class S2TCifTransformerEncoder(FairseqEncoder):
"""
Speech-to-text Transformer encoder that consists of
input convolutional subsampler & Transformer-based encoder.
"""
def __init__(self, args, task):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = task.target_dictionary.pad()
if args.frontend_type == "conv2d":
# Conv2d downsampling is borrowed from espnet
conv_output_channels = [
int(x) for x in args.conv2d_output_channels.split(",")
]
kernel_sizes = [int(k) for k in args.conv_kernel_sizes.split(",")]
self.subsample = Conv2dSubsampler(
idim=args.input_feat_per_channel,
odim=args.encoder_embed_dim,
conv_output_channels=conv_output_channels,
kernel_sizes=kernel_sizes,
)
else:
# Conv1d downsampling is proposed in (https://arxiv.org/abs/1911.08460)
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
# self.downsample_rate = 1.0 / (2 ** len(args.conv_kernel_sizes.split(",")))
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.transformer_layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
# build cif module
self.use_ctc_constraint = args.use_ctc_constraint
self.ctc_prob_threshold = args.ctc_prob_threshold
self.cif = CtcConstrainedCifMiddleware(args)
# build ctc projection
self.ctc_proj = None
if args.calulate_ctc_logits:
self.ctc_proj = Linear(
args.encoder_embed_dim, len(task.target_dictionary)
).cuda()
def _forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
x, input_lengths = self.subsample(src_tokens, src_lengths)
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
ctc_logits = None
if self.ctc_proj is not None:
ctc_logits = self.ctc_proj(x) # T x B x C
encoder_outputs = {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"conv_lengths": [input_lengths],
"ctc_logits": [ctc_logits] if ctc_logits is not None else [], # T x B x C
}
if self.use_ctc_constraint:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
ctc_logits=ctc_logits,
)
else:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
)
encoder_outputs["raw_encoder_out"] = [x]
encoder_outputs["raw_encoder_padding_mask"] = [encoder_padding_mask]
encoder_outputs["encoder_out"] = [cif_out["cif_out"]] # B x T x C
encoder_outputs["encoder_padding_mask"] = [
cif_out["cif_out_padding_mask"].bool()
] # B x T
# encoder_outputs["encoder_padding_mask"] = [~cif_out["cif_out_padding_mask"].bool()]
encoder_outputs["quantity_out"] = [cif_out["quantity_out"]]
encoder_outputs["cif_durations"] = [cif_out["cif_durations"]]
encoder_outputs["ctc_align_outputs"] = (
[cif_out["ctc_align_outputs"]] if self.use_ctc_constraint else None
)
return encoder_outputs
def forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
else:
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(0, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
class S2TCifTransformerLayerPoolingEncoder(FairseqEncoder):
"""Speech-to-text Transformer encoder that consists of input subsampler and
Transformer encoder."""
def __init__(self, args, task):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
if args.frontend_type == "conv2d":
# Conv2d downsampling is borrowed from espnet
conv_output_channels = [
int(x) for x in args.conv2d_output_channels.split(",")
]
kernel_sizes = [int(k) for k in args.conv_kernel_sizes.split(",")]
self.subsample = Conv2dSubsampler(
idim=args.input_feat_per_channel,
odim=args.encoder_embed_dim,
conv_output_channels=conv_output_channels,
kernel_sizes=kernel_sizes,
)
else:
# Conv1d downsampling is proposed in (https://arxiv.org/abs/1911.08460)
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
# self.downsample_rate = 1.0 / (2 ** len(args.conv_kernel_sizes.split(",")))
self.embed_positions = PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
self.padding_idx,
)
self.transformer_layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
# build cif module
self.use_ctc_constraint = args.use_ctc_constraint
self.ctc_prob_threshold = args.ctc_prob_threshold
if not self.use_ctc_constraint:
self.cif = CifMiddleware(args)
else:
self.cif = CtcConstrainedCifMiddleware(args)
# build ctc projection
self.ctc_proj = None
if args.calulate_ctc_logits:
self.ctc_proj = Linear(
args.encoder_embed_dim, len(task.target_dictionary)
).cuda()
# Layer Pooling settings
self.layer_downsampling = args.layer_downsampling
self.pooling_layer_ids = (
[int(num) for num in args.pooling_layer_ids.split(",")]
if self.layer_downsampling
else None
)
self.pooling_layer = (
nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))
if self.layer_downsampling
else None
)
def _forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
# Convolutional Subsampler
x, input_lengths = self.subsample(src_tokens, src_lengths)
# Transformer input preparation
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer_id, layer in enumerate(self.transformer_layers):
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_downsampling:
if (layer_id + 1) in self.pooling_layer_ids:
x = x.transpose(0, 1).unsqueeze(
dim=1
) # N (B) x C (1) x H (T) x W (D)
x = self.pooling_layer(x) # N (B) x C (1) x H(T)/2 x W (D)
x = x.squeeze(dim=1).transpose(0, 1)
encoder_padding_mask = (
(~encoder_padding_mask)
.float()
.unsqueeze(dim=-1)
.unsqueeze(dim=1)
)
encoder_padding_mask = (
self.pooling_layer(encoder_padding_mask)
.squeeze(dim=1)
.squeeze(dim=-1)
)
encoder_padding_mask = ~encoder_padding_mask.bool()
if self.layer_norm is not None:
x = self.layer_norm(x)
ctc_logits = None
if self.ctc_proj is not None:
ctc_logits = self.ctc_proj(x)
encoder_outputs = {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"conv_lengths": [input_lengths],
"ctc_logits": [ctc_logits] if ctc_logits is not None else [], # T x B x C
}
if self.use_ctc_constraint:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
ctc_logits=ctc_logits,
)
else:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
)
encoder_outputs["raw_encoder_out"] = [x]
encoder_outputs["raw_encoder_padding_mask"] = [encoder_padding_mask]
encoder_outputs["encoder_out"] = [cif_out["cif_out"]] # B x T x C
encoder_outputs["encoder_padding_mask"] = [
cif_out["cif_out_padding_mask"].bool()
] # B x T
encoder_outputs["quantity_out"] = [cif_out["quantity_out"]]
encoder_outputs["cif_durations"] = [cif_out["cif_durations"]]
encoder_outputs["ctc_align_outputs"] = (
[cif_out["ctc_align_outputs"]] if self.use_ctc_constraint else None
)
return encoder_outputs
def forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
else:
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(0, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
class S2TCifConformerLayerPoolingEncoder(FairseqEncoder):
"""Speech-to-text Transformer encoder that consists of input subsampler and
Transformer encoder."""
def __init__(self, args, task):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
if args.frontend_type == "conv2d":
# Conv2d downsampling is borrowed from espnet
conv_output_channels = [
int(x) for x in args.conv2d_output_channels.split(",")
]
kernel_sizes = [int(k) for k in args.conv_kernel_sizes.split(",")]
self.subsample = Conv2dSubsampler(
idim=args.input_feat_per_channel,
odim=args.encoder_embed_dim,
conv_output_channels=conv_output_channels,
kernel_sizes=kernel_sizes,
)
else:
# Conv1d downsampling is proposed in (https://arxiv.org/abs/1911.08460)
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
# self.downsample_rate = 1.0 / (2 ** len(args.conv_kernel_sizes.split(",")))
self.pos_enc_type = args.conformer_pos_enc_type
if self.pos_enc_type == "rel_pos":
self.embed_positions = RelPositionalEncoding(
args.max_source_positions, args.encoder_embed_dim
)
else:
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.linear = torch.nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.dropout = torch.nn.Dropout(args.dropout)
self.conformer_layers = torch.nn.ModuleList(
[
ConformerEncoderLayer(
embed_dim=args.encoder_embed_dim,
ffn_embed_dim=args.encoder_ffn_embed_dim,
attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
depthwise_conv_kernel_size=args.conformer_depthwise_conv_kernel_size,
attn_type=args.conformer_attn_type,
pos_enc_type=args.conformer_pos_enc_type,
use_fp16=args.fp16,
)
for _ in range(args.encoder_layers)
]
)
# build cif module
self.use_ctc_constraint = args.use_ctc_constraint
self.ctc_prob_threshold = args.ctc_prob_threshold
if not self.use_ctc_constraint:
self.cif = CifMiddleware(args)
else:
self.cif = CtcConstrainedCifMiddleware(args)
# build ctc projection
self.ctc_proj = None
if args.calulate_ctc_logits:
self.ctc_proj = Linear(
args.encoder_embed_dim, len(task.target_dictionary)
).cuda()
# Layer Pooling settings
self.layer_downsampling = args.layer_downsampling
self.pooling_layer_ids = (
[int(num) for num in args.pooling_layer_ids.split(",")]
if self.layer_downsampling
else None
)
self.pooling_layer = (
nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))
if self.layer_downsampling
else None
)
def _forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
if torch.isnan(src_tokens).sum() != 0:
print("src failure!!!")
print(x)
# Convolutional Subsampler
x, input_lengths = self.subsample(src_tokens, src_lengths)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
if torch.isnan(x).sum() != 0:
print("conv failure!!!")
print(x)
# Prepare inputs for Conformer layers
orig_x = x
if torch.isnan(x).sum() != 0:
print("TFM inputs orig_x linear failure!!!")
x = self.embed_scale * x
if torch.isnan(x).sum() != 0:
print("TFM inputs after scaling linear failure!!!")
if self.pos_enc_type == "rel_pos":
positions = self.embed_positions(x)
else:
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
positions = None
x = self.linear(x)
if torch.isnan(x).sum() != 0:
print("TFM inputs after linear failure!!!")
x = self.dropout_module(x) # T x B x C
if torch.isnan(x).sum() != 0:
print("TFM inputs failure!!!")
print(orig_x.size())
# Forward Conformer layers
encoder_states = []
for layer_id, layer in enumerate(self.conformer_layers):
# forward each conformer layer
x, _ = layer(x, encoder_padding_mask, positions)
if torch.isnan(x).sum() != 0:
print("output failure!!! @ %d" % layer_id)
if return_all_hiddens:
encoder_states.append(x)
# if apply layer downsampling
if self.layer_downsampling:
if (layer_id + 1) in self.pooling_layer_ids:
# Update flowing data x
x = x.transpose(0, 1).unsqueeze(
dim=1
) # N (B) x C (1) x H (T) x W (D)
x = self.pooling_layer(x) # N (B) x C (1) x H(T)/2 x W (D)
x = x.squeeze(dim=1).transpose(0, 1) # T/2 x B x C
# Update padding mask
encoder_padding_mask = (
(~encoder_padding_mask)
.float()
.unsqueeze(dim=-1)
.unsqueeze(dim=1)
)
encoder_padding_mask = (
self.pooling_layer(encoder_padding_mask)
.squeeze(dim=1)
.squeeze(dim=-1)
)
encoder_padding_mask = ~encoder_padding_mask.bool()
# Update positions
if self.pos_enc_type == "rel_pos":
positions = self.embed_positions(x)
# Forward CTC logits calculation
ctc_logits = None
if self.ctc_proj is not None:
ctc_logits = self.ctc_proj(x)
encoder_outputs = {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"conv_lengths": [input_lengths],
"ctc_logits": [ctc_logits] if ctc_logits is not None else [], # T x B x C
}
if self.use_ctc_constraint:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
ctc_logits=ctc_logits,
)
else:
cif_out = self.cif(
encoder_outputs=encoder_outputs,
target_lengths=target_lengths if self.training else None,
input_lengths=input_lengths,
)
if torch.isnan(cif_out["cif_out"]).sum() != 0:
print("cif failure!!!")
encoder_outputs["raw_encoder_out"] = [x]
encoder_outputs["raw_encoder_padding_mask"] = [encoder_padding_mask]
encoder_outputs["encoder_out"] = [cif_out["cif_out"]] # B x T x C
encoder_outputs["encoder_padding_mask"] = [
cif_out["cif_out_padding_mask"].bool()
] # B x T
encoder_outputs["quantity_out"] = [cif_out["quantity_out"]]
encoder_outputs["cif_durations"] = [cif_out["cif_durations"]]
encoder_outputs["ctxt_cif_out"] = [cif_out["ctxt_cif_out"]] # B x T x C
encoder_outputs["ctc_align_outputs"] = (
[cif_out["ctc_align_outputs"]] if self.use_ctc_constraint else None
)
return encoder_outputs
def forward(
self, src_tokens, src_lengths, target_lengths=None, return_all_hiddens=False
):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
else:
x = self._forward(
src_tokens,
src_lengths,
target_lengths,
return_all_hiddens=return_all_hiddens,
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(0, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
class CifArTransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *cfg.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
output_projection=None,
):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
# NAR decoder settings
self.do_decoder_nar = cfg.do_decoder_nar
self.decoder_nar_pad_type = cfg.decoder_nar_pad_type
self.add_pos_to_cif = cfg.add_pos_to_cif
# Dropout settings
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.decoder_layerdrop = cfg.decoder_layerdrop
# Embedding settings
self.share_input_output_embed = cfg.share_decoder_input_output_embed
# Dimension settings
self.cif_output_dim = cfg.cif_embedding_dim
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = cfg.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
if self.do_decoder_nar:
self.embed_tokens = None
else:
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
# Add quantized noise and adaptive inputs
self.quant_noise = None
if not cfg.adaptive_input and cfg.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise_pq,
cfg.quant_noise_pq_block_size,
)
# build input module
if self.do_decoder_nar:
self.project_in_dim = (
Linear(self.cif_output_dim, embed_dim, bias=False)
if embed_dim != self.cif_output_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim, # decoder embed dim
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings and self.add_pos_to_cif
else None
)
else:
self.project_in_dim = (
Linear((input_embed_dim + self.cif_output_dim), embed_dim, bias=False)
if embed_dim != (input_embed_dim + self.cif_output_dim)
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# Attention Settings
self.cross_self_attention = cfg.cross_self_attention
self.do_encoder_attn = cfg.do_encoder_attn
self.no_encoder_attn = not self.do_encoder_attn
self.decoder_enc_attn_kv_type = cfg.decoder_enc_attn_kv_type
# dropout settings
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
temp_decoder_cfg = copy.deepcopy(cfg)
cfg_dict = vars(temp_decoder_cfg)
cfg_dict["dropout"] = cfg_dict["decoder_dropout"]
cfg_dict["attention_dropout"] = cfg_dict["decoder_attention_dropout"]
cfg_dict["activation_dropout"] = cfg_dict["decoder_activation_dropout"]
temp_decoder_cfg = argparse.Namespace(**cfg_dict)
# build transformer layers
self.layers.extend(
[
self.build_decoder_layer(temp_decoder_cfg, self.no_encoder_attn)
for _ in range(cfg.decoder_layers)
]
)
self.num_layers = len(self.layers)
# build layernorm
if cfg.decoder_normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
# build output module
if self.do_decoder_nar:
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if self.output_embed_dim != embed_dim
else None
)
else:
self.project_out_dim = (
Linear(
(embed_dim + self.cif_output_dim), self.output_embed_dim, bias=False
)
if self.output_embed_dim != (embed_dim + self.cif_output_dim)
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(dictionary)
# Settings about decoder states
self.fetch_decoder_states_from = cfg.fetch_decoder_states_from
def build_output_projection(self, dictionary):
if not self.share_input_output_embed:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
) # D x V
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
else:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
) # D x V
self.output_projection.weight = self.embed_tokens.weight
def build_decoder_layer(self, cfg, no_encoder_attn=True):
layer = transformer_layer.TransformerDecoderLayerBaseDirectArgs(
cfg, no_encoder_attn=no_encoder_attn
)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output logits with shape B x T x V (vocab_size)
- a dictionary with any model-specific outputs
- the decoder's output states with shape B x T x C
"""
x, extra, decoder_states = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra, decoder_states
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# Prepare inputs for encoder-decoder attention
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if self.do_encoder_attn:
if self.decoder_enc_attn_kv_type == "cif":
enc = encoder_out["encoder_out"][0].transpose(
0, 1
) # Transpose to T x B x C
padding_mask = ~encoder_out["encoder_padding_mask"][0] # B x T
else:
enc = encoder_out["raw_encoder_out"][0] # T x B x C
padding_mask = encoder_out["raw_encoder_padding_mask"][0] # B x T
# cif outputs
cif_outs = encoder_out["encoder_out"][0]
_, cif_max_len, cif_embed_dim = cif_outs.size()
min_reg_len = min(cif_max_len, slen)
shifted_cif_outs = torch.cat(
[torch.zeros(bs, 1, cif_embed_dim, dtype=cif_outs.dtype).cuda(), cif_outs],
dim=1,
)[:, :cif_max_len, :]
# regularize lengths
cif_outs = cif_outs[:, :min_reg_len, :].cuda()
shifted_cif_outs = shifted_cif_outs[:, :min_reg_len, :].cuda()
prev_output_tokens = prev_output_tokens[:, :min_reg_len].cuda()
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:] # B x T x C
if incremental_state is not None:
shifted_cif_outs = shifted_cif_outs[:, -1:, :]
cif_outs = cif_outs[:, -1:, :]
# embed tokens and positions
if self.do_decoder_nar:
x = cif_outs # B x T x C
else:
x = self.embed_scale * self.embed_tokens(prev_output_tokens) # B x T x C
if self.quant_noise is not None:
x = self.quant_noise(x)
if not self.do_decoder_nar:
x = torch.cat([x, shifted_cif_outs], dim=-1) # B x T x C
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
# prepare attention mask for transformer layers
if self.do_decoder_nar:
if self.decoder_nar_pad_type == "full":
self_attn_mask = None
elif self.decoder_nar_pad_type == "triangle":
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
else:
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
decoder_states = None
if self.fetch_decoder_states_from == "tfm_outputs":
decoder_states = x # B x T x C
if not self.do_decoder_nar:
x = torch.cat([x, cif_outs], dim=-1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if self.fetch_decoder_states_from == "pre_final_output_proj":
decoder_states = x # B x T x C
return x, {"attn": [attn], "inner_states": inner_states}, decoder_states
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerDecoderScriptable(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(
model_name="s2t_cif_transformer", arch_name="s2t_cif_transformer"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.frontend_type = getattr(args, "frontend_type", "conv1d")
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.relu_dropout = getattr(args, "relu_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.do_encoder_attn = getattr(args, "do_encoder_attn", False)
args.decoder_enc_attn_kv_type = getattr(args, "decoder_enc_attn_kv_type", "raw")
args.do_decoder_nar = getattr(args, "do_decoder_nar", False)
args.decoder_nar_pad_type = getattr(args, "decoder_nar_pad_type", "full")
args.add_pos_to_cif = getattr(args, "add_pos_to_cif", False)
# Encoder layer downsampling settings
args.layer_downsampling = getattr(args, "layer_downsampling", False)
args.pooling_layer_ids = getattr(args, "pooling_layer_ids", "4,8")
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 256
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 256)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.5
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
args.use_ctc_constraint = getattr(args, "use_ctc_constraint", False)
args.ctc_prob_threshold = getattr(args, "ctc_prob_threshold", 0.5)
# Correction Module Settings
args.use_uem = getattr(args, "use_uem", False)
args.uem_input_state = getattr(
args, "uem_input_state", "cif_outputs,decoder_states,logits"
)
args.use_uem_bn_layer = getattr(args, "use_uem_bn_layer", False)
args.uem_bn_proj_dim = getattr(args, "uem_bn_proj_dim", 512)
args.use_cordec = getattr(args, "use_cordec", False)
args.num_cordec_layers = getattr(args, "num_cordec_layers", 4)
args.encoder_attn_type = getattr(args, "encoder_attn_type", "normal")
args.uncertainty_embed_fusion_mode = getattr(
args, "uncertainty_embed_fusion_mode", "top-concat"
)
args.cordec_input_state = getattr(
args, "cordec_input_state", "cif_outputs,decoder_states"
)
args.cordec_output_state = getattr(
args, "cordec_output_state", "cordec_state,bn_embeds"
)
args.corr_tgt_type = getattr(args, "corr_tgt_type", "tf-argmax,tf-sample")
args.K_corr_samp = getattr(args, "K_corr_samp", 5)
args.freeze_asr_main_body = getattr(args, "freeze_asr_main_body", False)
args.load_init_asr_model_from = getattr(args, "load_init_asr_model_from", "")
args.corr_dropout = getattr(args, "corr_dropout", 0.2)
args.corr_attention_dropout = getattr(
args, "corr_attention_dropout", args.corr_dropout
)
args.corr_activation_dropout = getattr(
args, "corr_activation_dropout", args.corr_dropout
)
args.stop_bn_grad = getattr(args, "stop_bn_grad", False)
args.fetch_decoder_states_from = getattr(
args, "fetch_decoder_states_from", "tfm_outputs"
)
@register_model_architecture(
model_name="s2t_cif_transformer", arch_name="s2t_cif_transformer_wide"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1280)
args.conv2d_output_channels = getattr(args, "conv2d_output_channels", "128")
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 640)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2560)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.relu_dropout = getattr(args, "relu_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.do_encoder_attn = getattr(args, "do_encoder_attn", False)
args.decoder_enc_attn_kv_type = getattr(args, "decoder_enc_attn_kv_type", "raw")
args.do_decoder_nar = getattr(args, "do_decoder_nar", False)
args.decoder_nar_pad_type = getattr(args, "decoder_nar_pad_type", "full")
args.add_pos_to_cif = getattr(args, "add_pos_to_cif", False)
# Encoder layer downsampling settings
args.layer_downsampling = getattr(args, "layer_downsampling", False)
args.pooling_layer_ids = getattr(args, "pooling_layer_ids", "4,8")
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 320
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 320)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.5
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
args.use_ctc_constraint = getattr(args, "use_ctc_constraint", False)
args.ctc_prob_threshold = getattr(args, "ctc_prob_threshold", 0.5)
# Correction Module Settings
args.use_uem = getattr(args, "use_uem", False)
args.uem_input_state = getattr(
args, "uem_input_state", "cif_outputs,decoder_states,logits"
)
args.use_uem_bn_layer = getattr(args, "use_uem_bn_layer", False)
args.uem_bn_proj_dim = getattr(args, "uem_bn_proj_dim", 512)
args.use_cordec = getattr(args, "use_cordec", False)
args.num_cordec_layers = getattr(args, "num_cordec_layers", 4)
args.encoder_attn_type = getattr(args, "encoder_attn_type", "normal")
args.uncertainty_embed_fusion_mode = getattr(
args, "uncertainty_embed_fusion_mode", "top-concat"
)
args.cordec_input_state = getattr(
args, "cordec_input_state", "cif_outputs,decoder_states"
)
args.cordec_output_state = getattr(
args, "cordec_output_state", "cordec_state,bn_embeds"
)
args.corr_tgt_type = getattr(args, "corr_tgt_type", "tf-argmax,tf-sample")
args.K_corr_samp = getattr(args, "K_corr_samp", 5)
args.freeze_asr_main_body = getattr(args, "freeze_asr_main_body", False)
args.load_init_asr_model_from = getattr(args, "load_init_asr_model_from", "")
args.corr_dropout = getattr(args, "corr_dropout", 0.2)
args.corr_attention_dropout = getattr(
args, "corr_attention_dropout", args.corr_dropout
)
args.corr_activation_dropout = getattr(
args, "corr_activation_dropout", args.corr_dropout
)
args.stop_bn_grad = getattr(args, "stop_bn_grad", False)
args.fetch_decoder_states_from = getattr(
args, "fetch_decoder_states_from", "tfm_outputs"
)
# Conformer settings
args.apply_conformer_encoder = getattr(args, "apply_conformer_encoder", False)
args.conformer_depthwise_conv_kernel_size = getattr(
args, "conformer_depthwise_conv_kernel_size", 15
)
args.conformer_attn_type = getattr(args, "conformer_attn_type", "espnet")
args.conformer_pos_enc_type = getattr(args, "conformer_pos_enc_type", "rel_pos")
@register_model_architecture(
model_name="s2t_cif_transformer", arch_name="s2t_cif_transformer_s"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 512)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.relu_dropout = getattr(args, "relu_dropout", args.dropout)
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0.0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.no_decoder_final_norm = getattr(args, "no_decoder_final_norm", False)
args.do_encoder_attn = getattr(args, "do_encoder_attn", False)
args.decoder_enc_attn_kv_type = getattr(
args, "decoder_enc_attn_kv_type", "cif"
) # "cif" or "raw"
args.do_decoder_nar = getattr(args, "do_decoder_nar", False)
args.decoder_nar_pad_type = getattr(args, "decoder_nar_pad_type", "full")
args.add_pos_to_cif = getattr(args, "add_pos_to_cif", False)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 256
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 256)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Correction Module Settings
args.use_uem = getattr(args, "use_uem", False)
args.uem_input_state = getattr(
args, "uem_input_state", "cif_outputs,decoder_states,logits"
)
args.use_uem_bn_layer = getattr(args, "use_uem_bn_layer", False)
args.uem_bn_proj_dim = getattr(args, "uem_bn_proj_dim", 512)
args.use_cordec = getattr(args, "use_cordec", False)
args.num_cordec_layers = getattr(args, "num_cordec_layers", 4)
args.encoder_attn_type = getattr(args, "encoder_attn_type", "normal")
args.uncertainty_embed_fusion_mode = getattr(
args, "uncertainty_embed_fusion_mode", "top-concat"
)
args.cordec_input_state = getattr(
args, "cordec_input_state", "cif_outputs,decoder_states"
)
args.cordec_output_state = getattr(
args, "cordec_output_state", "cordec_state,bn_embeds"
)
args.corr_tgt_type = getattr(args, "corr_tgt_type", "tf-argmax,tf-sample")
args.K_corr_samp = getattr(args, "K_corr_samp", 5)
args.freeze_asr_main_body = getattr(args, "freeze_asr_main_body", False)
args.load_init_asr_model_from = getattr(args, "load_init_asr_model_from", "")
args.corr_dropout = getattr(args, "corr_dropout", 0.2)
args.corr_attention_dropout = getattr(
args, "corr_attention_dropout", args.corr_dropout
)
args.corr_activation_dropout = getattr(
args, "corr_activation_dropout", args.corr_dropout
)
args.stop_bn_grad = getattr(args, "stop_bn_grad", False)
args.fetch_decoder_states_from = getattr(
args, "fetch_decoder_states_from", "tfm_outputs"
)
# Conformer settings
args.apply_conformer_encoder = getattr(args, "apply_conformer_encoder", False)
args.conformer_depthwise_conv_kernel_size = getattr(
args, "conformer_depthwise_conv_kernel_size", 15
)
args.conformer_attn_type = getattr(args, "conformer_attn_type", "espnet")
args.conformer_pos_enc_type = getattr(args, "conformer_pos_enc_type", "rel_pos")
# BERT Distillation Settings
args.bert_distill_feat_dim = getattr(args, "bert_distill_feat_dim", 768)
args.apply_bert_distill = getattr(args, "apply_bert_distill", False)
args.use_contextualized_cif_feats_for_distill = getattr(
args, "use_contextualized_cif_feats_for_distill", False
)
| 139,663
| 38.264549
| 112
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/berard.py
|
#!/usr/bin/env python3
from ast import literal_eval
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
@register_model("s2t_berard")
class BerardModel(FairseqEncoderDecoderModel):
"""Implementation of a model similar to https://arxiv.org/abs/1802.04200
Paper title: End-to-End Automatic Speech Translation of Audiobooks
An implementation is available in tensorflow at
https://github.com/eske/seq2seq
Relevant files in this implementation are the config
(https://github.com/eske/seq2seq/blob/master/config/LibriSpeech/AST.yaml)
and the model code
(https://github.com/eske/seq2seq/blob/master/translate/models.py).
The encoder and decoder try to be close to the original implementation.
The attention is an MLP as in Bahdanau et al.
(https://arxiv.org/abs/1409.0473).
There is no state initialization by averaging the encoder outputs.
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
parser.add_argument(
"--input-layers",
type=str,
metavar="EXPR",
help="List of linear layer dimensions. These "
"layers are applied to the input features and "
"are followed by tanh and possibly dropout.",
)
parser.add_argument(
"--dropout",
type=float,
metavar="D",
help="Dropout probability to use in the encoder/decoder. "
"Note that this parameters control dropout in various places, "
"there is no fine-grained control for dropout for embeddings "
"vs LSTM layers for example.",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="Number of encoder input channels. " "Typically value is 1.",
)
parser.add_argument(
"--conv-layers",
type=str,
metavar="EXPR",
help="List of conv layers " "(format: (channels, kernel, stride)).",
)
parser.add_argument(
"--num-blstm-layers",
type=int,
metavar="N",
help="Number of encoder bi-LSTM layers.",
)
parser.add_argument(
"--lstm-size", type=int, metavar="N", help="LSTM hidden size."
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="Embedding dimension of the decoder target tokens.",
)
parser.add_argument(
"--decoder-hidden-dim",
type=int,
metavar="N",
help="Decoder LSTM hidden dimension.",
)
parser.add_argument(
"--decoder-num-layers",
type=int,
metavar="N",
help="Number of decoder LSTM layers.",
)
parser.add_argument(
"--attention-dim",
type=int,
metavar="N",
help="Hidden layer dimension in MLP attention.",
)
parser.add_argument(
"--output-layer-dim",
type=int,
metavar="N",
help="Hidden layer dim for linear layer prior to output projection.",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
@classmethod
def build_encoder(cls, args, task):
encoder = BerardEncoder(
input_layers=literal_eval(args.input_layers),
conv_layers=literal_eval(args.conv_layers),
in_channels=args.input_channels,
input_feat_per_channel=args.input_feat_per_channel,
num_blstm_layers=args.num_blstm_layers,
lstm_size=args.lstm_size,
dropout=args.dropout,
)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@classmethod
def build_decoder(cls, args, task):
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
num_layers=args.decoder_num_layers,
hidden_size=args.decoder_hidden_dim,
dropout=args.dropout,
encoder_output_dim=2 * args.lstm_size, # bidirectional
attention_dim=args.attention_dim,
output_layer_dim=args.output_layer_dim,
)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (B, T, D) tensor
lprobs.batch_first = True
return lprobs
class BerardEncoder(FairseqEncoder):
def __init__(
self,
input_layers: List[int],
conv_layers: List[Tuple[int]],
in_channels: int,
input_feat_per_channel: int,
num_blstm_layers: int,
lstm_size: int,
dropout: float,
):
"""
Args:
input_layers: list of linear layer dimensions. These layers are
applied to the input features and are followed by tanh and
possibly dropout.
conv_layers: list of conv2d layer configurations. A configuration is
a tuple (out_channels, conv_kernel_size, stride).
in_channels: number of input channels.
input_feat_per_channel: number of input features per channel. These
are speech features, typically 40 or 80.
num_blstm_layers: number of bidirectional LSTM layers.
lstm_size: size of the LSTM hidden (and cell) size.
dropout: dropout probability. Dropout can be applied after the
linear layers and LSTM layers but not to the convolutional
layers.
"""
super().__init__(None)
self.input_layers = nn.ModuleList()
in_features = input_feat_per_channel
for out_features in input_layers:
if dropout > 0:
self.input_layers.append(
nn.Sequential(
nn.Linear(in_features, out_features), nn.Dropout(p=dropout)
)
)
else:
self.input_layers.append(nn.Linear(in_features, out_features))
in_features = out_features
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
self.conv_kernel_sizes_and_strides = []
self.conv_layers = nn.ModuleList()
lstm_input_dim = input_layers[-1]
for conv_layer in conv_layers:
out_channels, conv_kernel_size, conv_stride = conv_layer
self.conv_layers.append(
nn.Conv2d(
in_channels,
out_channels,
conv_kernel_size,
stride=conv_stride,
padding=conv_kernel_size // 2,
)
)
self.conv_kernel_sizes_and_strides.append((conv_kernel_size, conv_stride))
in_channels = out_channels
lstm_input_dim //= conv_stride
lstm_input_dim *= conv_layers[-1][0]
self.lstm_size = lstm_size
self.num_blstm_layers = num_blstm_layers
self.lstm = nn.LSTM(
input_size=lstm_input_dim,
hidden_size=lstm_size,
num_layers=num_blstm_layers,
dropout=dropout,
bidirectional=True,
)
self.output_dim = 2 * lstm_size # bidirectional
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
def forward(self, src_tokens, src_lengths=None, **kwargs):
"""
Args
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
# (B, C, T, feat)
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
for input_layer in self.input_layers:
x = input_layer(x)
x = torch.tanh(x)
for conv_layer in self.conv_layers:
x = conv_layer(x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) ->
# (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
input_lengths = src_lengths.clone()
for k, s in self.conv_kernel_sizes_and_strides:
p = k // 2
input_lengths = (input_lengths.float() + 2 * p - k) / s + 1
input_lengths = input_lengths.floor().long()
packed_x = nn.utils.rnn.pack_padded_sequence(x, input_lengths)
h0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
c0 = x.new(2 * self.num_blstm_layers, bsz, self.lstm_size).zero_()
packed_outs, _ = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_outs)
if self.dropout is not None:
x = self.dropout(x)
encoder_padding_mask = (
lengths_to_padding_mask(output_lengths).to(src_tokens.device).t()
)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class MLPAttention(nn.Module):
"""The original attention from Badhanau et al. (2014)
https://arxiv.org/abs/1409.0473, based on a Multi-Layer Perceptron.
The attention score between position i in the encoder and position j in the
decoder is: alpha_ij = V_a * tanh(W_ae * enc_i + W_ad * dec_j + b_a)
"""
def __init__(self, decoder_hidden_state_dim, context_dim, attention_dim):
super().__init__()
self.context_dim = context_dim
self.attention_dim = attention_dim
# W_ae and b_a
self.encoder_proj = nn.Linear(context_dim, self.attention_dim, bias=True)
# W_ad
self.decoder_proj = nn.Linear(
decoder_hidden_state_dim, self.attention_dim, bias=False
)
# V_a
self.to_scores = nn.Linear(self.attention_dim, 1, bias=False)
def forward(self, decoder_state, source_hids, encoder_padding_mask):
"""The expected input dimensions are:
decoder_state: bsz x decoder_hidden_state_dim
source_hids: src_len x bsz x context_dim
encoder_padding_mask: src_len x bsz
"""
src_len, bsz, _ = source_hids.size()
# (src_len*bsz) x context_dim (to feed through linear)
flat_source_hids = source_hids.view(-1, self.context_dim)
# (src_len*bsz) x attention_dim
encoder_component = self.encoder_proj(flat_source_hids)
# src_len x bsz x attention_dim
encoder_component = encoder_component.view(src_len, bsz, self.attention_dim)
# 1 x bsz x attention_dim
decoder_component = self.decoder_proj(decoder_state).unsqueeze(0)
# Sum with broadcasting and apply the non linearity
# src_len x bsz x attention_dim
hidden_att = torch.tanh(
(decoder_component + encoder_component).view(-1, self.attention_dim)
)
# Project onto the reals to get attentions scores (src_len x bsz)
attn_scores = self.to_scores(hidden_att).view(src_len, bsz)
# Mask + softmax (src_len x bsz)
if encoder_padding_mask is not None:
attn_scores = (
attn_scores.float()
.masked_fill_(encoder_padding_mask, float("-inf"))
.type_as(attn_scores)
) # FP16 support: cast to float and back
# srclen x bsz
normalized_masked_attn_scores = F.softmax(attn_scores, dim=0)
# Sum weighted sources (bsz x context_dim)
attn_weighted_context = (
source_hids * normalized_masked_attn_scores.unsqueeze(2)
).sum(dim=0)
return attn_weighted_context, normalized_masked_attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
def __init__(
self,
dictionary,
embed_dim,
num_layers,
hidden_size,
dropout,
encoder_output_dim,
attention_dim,
output_layer_dim,
):
"""
Args:
dictionary: target text dictionary.
embed_dim: embedding dimension for target tokens.
num_layers: number of LSTM layers.
hidden_size: hidden size for LSTM layers.
dropout: dropout probability. Dropout can be applied to the
embeddings, the LSTM layers, and the context vector.
encoder_output_dim: encoder output dimension (hidden size of
encoder LSTM).
attention_dim: attention dimension for MLP attention.
output_layer_dim: size of the linear layer prior to output
projection.
"""
super().__init__(dictionary)
self.num_layers = num_layers
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = nn.Embedding(num_embeddings, embed_dim, padding_idx)
if dropout > 0:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = None
self.layers = nn.ModuleList()
for layer_id in range(num_layers):
input_size = embed_dim if layer_id == 0 else encoder_output_dim
self.layers.append(
nn.LSTMCell(input_size=input_size, hidden_size=hidden_size)
)
self.context_dim = encoder_output_dim
self.attention = MLPAttention(
decoder_hidden_state_dim=hidden_size,
context_dim=encoder_output_dim,
attention_dim=attention_dim,
)
self.deep_output_layer = nn.Linear(
hidden_size + encoder_output_dim + embed_dim, output_layer_dim
)
self.output_projection = nn.Linear(output_layer_dim, num_embeddings)
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs
):
encoder_padding_mask = encoder_out["encoder_padding_mask"]
encoder_outs = encoder_out["encoder_out"]
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
srclen = encoder_outs.size(0)
# embed tokens
embeddings = self.embed_tokens(prev_output_tokens)
x = embeddings
if self.dropout is not None:
x = self.dropout(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental
# generation)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is not None:
prev_hiddens, prev_cells = cached_state
else:
prev_hiddens = [encoder_out["encoder_out"].mean(dim=0)] * self.num_layers
prev_cells = [x.new_zeros(bsz, self.hidden_size)] * self.num_layers
attn_scores = x.new_zeros(bsz, srclen)
attention_outs = []
outs = []
for j in range(seqlen):
input = x[j, :, :]
attention_out = None
for i, layer in enumerate(self.layers):
# the previous state is one layer below except for the bottom
# layer where the previous state is the state emitted by the
# top layer
hidden, cell = layer(
input,
(
prev_hiddens[(i - 1) % self.num_layers],
prev_cells[(i - 1) % self.num_layers],
),
)
if self.dropout is not None:
hidden = self.dropout(hidden)
prev_hiddens[i] = hidden
prev_cells[i] = cell
if attention_out is None:
attention_out, attn_scores = self.attention(
hidden, encoder_outs, encoder_padding_mask
)
if self.dropout is not None:
attention_out = self.dropout(attention_out)
attention_outs.append(attention_out)
input = attention_out
# collect the output of the top layer
outs.append(hidden)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self, incremental_state, "cached_state", (prev_hiddens, prev_cells)
)
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
attention_outs_concat = torch.cat(attention_outs, dim=0).view(
seqlen, bsz, self.context_dim
)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
attention_outs_concat = attention_outs_concat.transpose(0, 1)
# concat LSTM output, attention output and embedding
# before output projection
x = torch.cat((x, attention_outs_concat, embeddings), dim=2)
x = self.deep_output_layer(x)
x = torch.tanh(x)
if self.dropout is not None:
x = self.dropout(x)
# project back to size of vocabulary
x = self.output_projection(x)
# to return the full attn_scores tensor, we need to fix the decoder
# to account for subsampling input frames
# return x, attn_scores
return x, None
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(
self, incremental_state, "cached_state"
)
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, "cached_state", new_state)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard")
def berard(args):
"""The original version: "End-to-End Automatic Speech Translation of
Audiobooks" (https://arxiv.org/abs/1802.04200)
"""
args.input_layers = getattr(args, "input_layers", "[256, 128]")
args.conv_layers = getattr(args, "conv_layers", "[(16, 3, 2), (16, 3, 2)]")
args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
args.lstm_size = getattr(args, "lstm_size", 256)
args.dropout = getattr(args, "dropout", 0.2)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 128)
args.load_pretrained_encoder_from = getattr(
args, "load_pretrained_encoder_from", None
)
args.load_pretrained_decoder_from = getattr(
args, "load_pretrained_decoder_from", None
)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_256_3_3")
def berard_256_3_3(args):
"""Used in
* "Harnessing Indirect Training Data for End-to-End Automatic Speech
Translation: Tricks of the Trade" (https://arxiv.org/abs/1909.06515)
* "CoVoST: A Diverse Multilingual Speech-To-Text Translation Corpus"
(https://arxiv.org/pdf/2002.01320.pdf)
* "Self-Supervised Representations Improve End-to-End Speech Translation"
(https://arxiv.org/abs/2006.12124)
"""
args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
berard(args)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_3_2")
def berard_512_3_2(args):
args.num_blstm_layers = getattr(args, "num_blstm_layers", 3)
args.lstm_size = getattr(args, "lstm_size", 512)
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 2)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 256)
berard(args)
@register_model_architecture(model_name="s2t_berard", arch_name="s2t_berard_512_5_3")
def berard_512_5_3(args):
args.num_blstm_layers = getattr(args, "num_blstm_layers", 5)
args.lstm_size = getattr(args, "lstm_size", 512)
args.dropout = getattr(args, "dropout", 0.3)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_num_layers = getattr(args, "decoder_num_layers", 3)
args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 1024)
args.attention_dim = getattr(args, "attention_dim", 512)
args.output_layer_dim = getattr(args, "output_layer_dim", 256)
berard(args)
| 23,124
| 37.097199
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/xm_transformer.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import copy
from typing import Dict, List, Optional, Tuple
from fairseq import utils, checkpoint_utils
from fairseq.models import (
FairseqEncoderDecoderModel,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.models.wav2vec import Wav2VecEncoder
from fairseq.modules.layer_norm import LayerNorm
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.utils import safe_hasattr
from torch import Tensor
import torch.nn as nn
logger = logging.getLogger(__name__)
class Conv1dAdaptor(nn.Module):
def __init__(
self, in_dim, out_dim, n_layers=3, kernel_size=3, stride=2, add_layernorm=False
):
super().__init__()
self.layers = nn.ModuleList(
nn.Conv1d(
in_dim if i == 0 else out_dim,
out_dim * 2,
kernel_size,
stride=stride,
padding=kernel_size // 2,
)
for i in range(n_layers)
)
self.layernorms = None
if add_layernorm:
self.layernorms = nn.ModuleList(LayerNorm(out_dim) for _ in range(n_layers))
self.stride = stride
@classmethod
def add_args(cls, parser):
parser.add_argument("--adaptor-n-layers", type=int)
parser.add_argument("--adaptor-kernel-size", type=int)
parser.add_argument("--adaptor-stride", type=int)
parser.add_argument("--adaptor-layernorm", action="store_true")
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in self.layers:
out = ((out.float() - 1) / self.stride + 1).floor().long()
return out
def forward(self, x, padding_mask):
# T x B x C -> B x C x T
x = x.transpose(0, 1).transpose(1, 2)
for i, layer in enumerate(self.layers):
x = nn.functional.glu(layer(x), dim=1)
if self.layernorms is not None:
x = self.layernorms[i](x.transpose(1, 2)).transpose(1, 2)
# B x C x T -> T x B x C
x = x.transpose(1, 2).transpose(0, 1)
if padding_mask is None:
out_padding_mask = None
else:
out_lengths = self.get_out_seq_lens_tensor((~padding_mask).sum(1))
out_padding_mask = lengths_to_padding_mask(out_lengths)
return x, out_padding_mask
def add_wav2vec_asr_args(parser):
parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model")
parser.add_argument(
"--no-pretrained-weights",
action="store_true",
help="if true, does not load pretrained weights",
)
parser.add_argument(
"--dropout-input",
type=float,
metavar="D",
help="dropout to apply to the input (after feat extr)",
)
parser.add_argument(
"--final-dropout",
type=float,
metavar="D",
help="dropout after transformer and before final projection",
)
parser.add_argument(
"--apply-mask", action="store_true", help="apply masking during fine-tuning"
)
parser.add_argument(
"--dropout",
type=float,
metavar="D",
help="dropout probability inside wav2vec 2.0 model",
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights inside wav2vec 2.0 model",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN inside wav2vec 2.0 model",
)
parser.add_argument(
"--mask-length", type=int, help="repeat the mask indices multiple times"
)
parser.add_argument(
"--mask-prob", type=float, help="probability of replacing a token with mask"
)
parser.add_argument(
"--mask-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--mask-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--no-mask-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--mask-channel-length", type=int, help="repeat the mask indices multiple times"
)
parser.add_argument(
"--mask-channel-prob",
type=float,
help="probability of replacing a token with mask",
)
parser.add_argument(
"--mask-channel-selection",
type=str,
choices=["static", "uniform", "normal", "poisson"],
help="how to choose masks",
)
parser.add_argument(
"--mask-channel-other",
type=float,
help="stdev of the mask length in case of 'normal' selection strategy",
)
parser.add_argument(
"--no-mask-channel-overlap",
action="store_true",
help="whether to allow masks to overlap",
)
parser.add_argument(
"--freeze-finetune-updates",
default=0,
type=int,
help="dont finetune wav2vec for this many updates",
)
parser.add_argument(
"--feature-grad-mult",
default=None,
type=float,
help="reset feature grad mult in wav2vec 2.0 to this",
)
parser.add_argument(
"--layerdrop",
default=0.0,
type=float,
help="probability of dropping a layer in wav2vec 2.0",
)
parser.add_argument("--w2v-args", default=None)
class Wav2VecEncoderWithAdaptor(FairseqEncoder):
def __init__(self, args):
super().__init__(None)
self.w2v_encoder = Wav2VecEncoder(args)
encoder_out_dim = self.w2v_encoder.w2v_model.encoder.embedding_dim
# Projection + 8x shrinking
self.adaptor = Conv1dAdaptor(
encoder_out_dim,
args.decoder_embed_dim,
n_layers=args.adaptor_n_layers,
kernel_size=args.adaptor_kernel_size,
stride=args.adaptor_stride,
add_layernorm=args.adaptor_layernorm,
)
for k, p in self.w2v_encoder.w2v_model.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_w2v_params"
) and XMTransformerModel.finetune_params(args.finetune_w2v_params, k):
p.requires_grad = True
else:
p.requires_grad = False
@classmethod
def add_args(cls, parser):
add_wav2vec_asr_args(parser)
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--finetune-w2v-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
Conv1dAdaptor.add_args(parser)
def forward(self, src_tokens, src_lengths=None, **kwargs):
padding_mask = lengths_to_padding_mask(src_lengths)
out = self.w2v_encoder.forward(src_tokens, padding_mask, tbc=True)
x = out["encoder_out"]
enc_padding_mask = None
if out["encoder_padding_mask"] is not None:
enc_padding_mask = out["encoder_padding_mask"].transpose(
0, 1
) # T X B --> B X T
x, enc_padding_mask = self.adaptor(x, enc_padding_mask)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [enc_padding_mask]
if enc_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def add_decoder_args(parser):
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--decoder-dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--decoder-activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension"
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--layernorm-embedding", action="store_true", help="add layernorm to embedding"
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
parser.add_argument(
"--finetune-decoder-params",
type=str,
metavar="STR",
help="comma-separated param strings to finetune.",
)
parser.add_argument("--checkpoint-activations", action="store_true")
@register_model("xm_transformer")
class XMTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
Wav2VecEncoderWithAdaptor.add_args(parser)
add_decoder_args(parser)
@classmethod
def build_encoder(cls, args):
_args = copy.deepcopy(args)
state = checkpoint_utils.load_checkpoint_to_cpu(args.w2v_path)
if state.get("cfg") is not None:
encoder_embed_dim = state["cfg"]._content["model"]["encoder_embed_dim"]
elif state.get("args") is not None:
encoder_embed_dim = state["args"].encoder_embed_dim
else:
raise ValueError(f"Invalid config in {args.w2v_path}")
_args.decoder_embed_dim = encoder_embed_dim
encoder = Wav2VecEncoderWithAdaptor(_args)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
_args = copy.deepcopy(args)
_args.dropout = args.decoder_dropout
_args.attention_dropout = args.decoder_attention_dropout
_args.activation_dropout = args.decoder_activation_dropout
_args.max_target_positions = 1024
decoder = TransformerDecoder(_args, task.target_dictionary, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
for k, p in decoder.named_parameters():
# Freeze pretrained models by default
if safe_hasattr(
args, "finetune_decoder_params"
) and XMTransformerModel.finetune_params(args.finetune_decoder_params, k):
p.requires_grad = True
else:
p.requires_grad = False
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths, **kwargs
)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
def upgrade_state_dict(self, state_dict):
for k, _ in state_dict.items():
if "adaptor.layers" in state_dict:
print(k)
new = k.replace("adaptor.layers", "adaptor_layers")
state_dict[new] = state_dict[k]
del state_dict[k]
@staticmethod
def finetune_params(finetune_params, param_name):
if finetune_params == "all":
return True
finetune_params_list = finetune_params.split(",")
for finetune_param in finetune_params_list:
if finetune_param in param_name:
return True
return False
def set_default_w2v_encoder_args(args):
args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False)
args.dropout_input = getattr(args, "dropout_input", 0)
args.final_dropout = getattr(args, "final_dropout", 0)
args.apply_mask = getattr(args, "apply_mask", False)
args.dropout = getattr(args, "dropout", 0)
args.attention_dropout = getattr(args, "attention_dropout", 0)
args.activation_dropout = getattr(args, "activation_dropout", 0)
args.mask_length = getattr(args, "mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.5)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5)
args.mask_channel_before = getattr(args, "mask_channel_before", False)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0)
args.feature_grad_mult = 0.1
args.layerdrop = getattr(args, "layerdrop", 0.0)
args.normalize = getattr(args, "normalize", False)
def set_default_adaptor_args(args):
args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3)
args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3)
args.adaptor_stride = getattr(args, "adaptor_stride", 2)
args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False)
def set_default_mbart_decoder_args(args):
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * 1024)
args.decoder_layers = getattr(args, "decoder_layers", 12)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", True)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_attention_dropout = getattr(args, "decoder_attention_dropout", 0.0)
args.decoder_activation_dropout = getattr(args, "decoder_activation_dropout", 0.0)
args.decoder_dropout = getattr(args, "decoder_dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", True
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.layernorm_embedding = getattr(args, "layernorm_embedding", True)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh")
args.pooler_dropout = getattr(args, "pooler_dropout", 0.0)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
@register_model_architecture(model_name="xm_transformer", arch_name="xm_transformer")
def base_architecture(args):
set_default_w2v_encoder_args(args)
set_default_adaptor_args(args)
set_default_mbart_decoder_args(args)
| 19,926
| 34.583929
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/utils.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from collections.abc import Iterable
from itertools import repeat
from typing import List, Optional, Tuple
import torch
from torch import Tensor
# ------------------------------------------------------------------------------
# assert_equal()
# ------------------------------------------------------------------------------
def assert_equal(value1, value2, name1=None, name2=None):
"""Asserts two values are equal otherwise raise an error."""
str_name1 = "" if name1 is None else "{} ".format(name1)
str_name2 = "" if name2 is None else "{} ".format(name2)
if value1 != value2:
str_value1 = "{}" if name1 is None else "({})"
str_value1 = str_value1.format(value1)
str_value2 = "{}" if name2 is None else "({})"
str_value2 = str_value2.format(value2)
raise ValueError(
"Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2)
)
def fill_config(config, key, value):
if value is not None:
if key not in config or config[key] is None:
config[key] = value
assert_equal(value, config[key], "value", f'config["{key}"]')
# ------------------------------------------------------------------------------
# check_and_return_expected()
# ------------------------------------------------------------------------------
def check_and_return_expected(value, undefined_value, expected_value, name=None):
"""
Return the expected value while checking if the given value is undefined or
equal to the expected value.
"""
if (undefined_value is None and value is None) or (undefined_value == value):
return expected_value
if value != expected_value:
str_name = "" if name is None else "{} ".format(name)
str_value = "{}" if name is None else "({})"
str_value = str_value.format(value)
raise ValueError(
"Expected {}{} == {}".format(str_name, str_value, expected_value)
)
return expected_value
# ------------------------------------------------------------------------------
# get_time_axis()
# ------------------------------------------------------------------------------
def get_time_axis(layout):
"""
Extract the time axis from the layout, for example for breaking sequence into
segments.
"""
if layout in ["TB", "TBD"]:
return 0
if layout in ["BT", "BTD"]:
return 1
if layout in ["BCTD"]:
return 2
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# get_batch_axis()
# ------------------------------------------------------------------------------
def get_batch_axis(layout):
"""
Extract the batch axis from the layout
"""
if layout in ["TB", "TBD"]:
return 1
if layout in ["BT", "BTD", "BCTD"]:
return 0
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# monotonically_increasing_and_bounded()
# ------------------------------------------------------------------------------
def monotonically_increasing_and_bounded(iterable, min=None, max=None):
"""
Check if the elements in the given iterable are monotonically increasing and
bounded by upper/lower bounds.
"""
if not isinstance(iterable, Iterable):
raise TypeError(
"Expected iterable to be of type Iterable, got ({})".format(
iterable.__class__.__name__
)
)
for i in range(len(iterable)):
if min is not None and iterable[i] < min:
return False
if max is not None and iterable[i] > max:
return False
if i > 0 and iterable[i] <= iterable[i - 1]:
return False
return True
# ------------------------------------------------------------------------------
# to_pair()
# ------------------------------------------------------------------------------
def to_pair(value, name):
"""Make a pair (of type tuple) of given value."""
if isinstance(value, Iterable):
if len(value) != 2:
raise ValueError(
"Expected `{}` to have exactly 2 elements, got: ({})".format(
name, value
)
)
return value
return tuple(repeat(value, 2))
# ------------------------------------------------------------------------------
# infer_conv_output_attrs()
# ------------------------------------------------------------------------------
# TODO(cfyeh): figure out if we can get `output_dim` without calling the module.
def infer_conv_output_attrs(
module, input_channels, input_dim, batch_size=1, max_length=8
):
"""Get output attributes of a module with input."""
input = torch.randn(batch_size, input_channels, max_length, input_dim)
output = module(input)
output_channels = output.shape[1]
output_dim = output.shape[-1]
return output_channels, output_dim
# ------------------------------------------------------------------------------
# NoOp
# ------------------------------------------------------------------------------
class NoOp(torch.nn.Module):
"""
NoOp simply passes the input as the output.
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor) -> Tensor:
return input
# ------------------------------------------------------------------------------
# Permute: a torch.nn.Module applies permutation on the input tensor.
# ------------------------------------------------------------------------------
class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, input: Tensor) -> Tensor:
return input.permute(self.dims).contiguous()
# ------------------------------------------------------------------------------
# lengths_to_padding_mask()
# ------------------------------------------------------------------------------
def lengths_to_padding_mask(lengths: Tensor) -> Tensor:
"""Convert lengths of shape (B, ) to padding mask."""
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange( # [0, ..., T-1]
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
# ------------------------------------------------------------------------------
# lengths_to_attention_mask()
# ------------------------------------------------------------------------------
def lengths_to_attention_mask(
lengths: Tensor,
left_context: Optional[int] = None,
right_context: Optional[int] = None,
) -> Optional[Tensor]:
"""
Generate attention mask based on (lengths, left_context, right_context).
left_context is None means unlimited left context.
right_context is None means unlimited right context.
"""
if left_context is None and right_context is None:
return None
max_length = int(torch.max(lengths).item())
# For example, with `max_length` == 5,
# indices = tensor([
# [ 0, 1, 2, 3, 4, 5],
# [-1, 0, 1, 2, 3, 4],
# [-2, -1, 0, 1, 2, 3],
# [-3, -2, -1, 0, 1, 2],
# [-4, -3, -2, -1, 0, 1],
# [-5, -4, -3, -2, -1, 0],
# ])
# In some cases the second torch.arange is created on cpu which causes a
# failure. Adding the device option to guard against it.
indices = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(max_length, max_length) - torch.arange(
max_length, device=lengths.device
).view(
max_length, -1
)
# For example, with `max_length` == 5,
# bool_mask = tensor([
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
bool_mask = (
torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length)
)
# For example, with `max_length` == 5, left_context == 2
# left_mask = tensor([
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [False, True, True, True, True],
# [False, False, True, True, True],
# ])
if left_context is not None:
left_mask = indices >= -left_context
bool_mask = bool_mask & left_mask
# For example, with `max_length` == 5, right_context == 1
# right_mask = tensor([
# [True, True, False, False, False],
# [True, True, True, False, False],
# [True, True, True, True, False],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
if right_context is not None:
right_mask = indices <= right_context
bool_mask = bool_mask & right_mask
bool_mask = (~bool_mask).to(device=lengths.device)
return bool_mask
# ------------------------------------------------------------------------------
# infer_output_norm()
# ------------------------------------------------------------------------------
def infer_output_norm(module, output_norm=None):
"""
Infer the output norm (string and module) needed on the module gvien desired
output normalization.
"""
if output_norm == module.output_norm():
# output_norm already matches module.output_norm().
return (None, NoOp())
if output_norm is None and module.output_norm() is not None:
logger = logging.getLogger("infer_output_norm()")
logger.warning(
"trying to set output_norm ({}) ".format(output_norm)
+ "but got module.output_norm() ({}), ".format(module.output_norm())
+ "the combined output_norm() will be ({})".format(module.output_norm())
)
return (None, NoOp())
if output_norm == "log_softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("log_softmax", torch.nn.LogSoftmax(dim=-1))
if output_norm == "softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("softmax", torch.nn.Softmax(dim=-1))
raise ValueError(
"output_norm ({}) not in ".format(output_norm)
+ "supported list = [None, softmax, log_softmax]"
)
# ------------------------------------------------------------------------------
# infer_channels_from_layout()
# ------------------------------------------------------------------------------
def infer_channels_from_layout(layout, channels):
"""Extract the number of channels from the layout."""
if layout in ("TBD", "BTD"):
if channels is not None and channels != 1:
raise ValueError(
"Expected channels ({}) to be 1 for layout = {}".format(
channels, layout
)
)
if channels is None:
return 1
return channels
# ------------------------------------------------------------------------------
# pad_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def pad_sequence(
sequence: Tensor,
time_axis: int,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> Tensor:
"""Pad extra left/right contexts to the sequence."""
if extra_left_context == 0 and extra_right_context == 0:
return sequence
tensors_to_concat = []
if extra_left_context:
size = (extra_left_context,)
fill_value = 0
indices = torch.full(
size=size,
fill_value=fill_value,
dtype=torch.long,
device=sequence.device,
)
left_padding = torch.index_select(sequence, time_axis, indices)
tensors_to_concat.append(left_padding)
tensors_to_concat.append(sequence)
# NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for
# extra right contexts.
if extra_right_context:
size = list(sequence.shape)
size[time_axis] = extra_right_context
right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device)
tensors_to_concat.append(right_padding)
padded_sequence = torch.cat(tensors_to_concat, dim=time_axis)
return padded_sequence
# ------------------------------------------------------------------------------
# sequence_to_segments()
# ------------------------------------------------------------------------------
@torch.jit.export
def sequence_to_segments(
sequence: Tensor,
time_axis: int,
lengths: Tensor,
segment_size: Optional[int] = None,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> List[Tuple[Tensor, Tensor]]:
"""Breaks sequence into segments."""
sequence = pad_sequence(
sequence=sequence,
time_axis=time_axis,
extra_left_context=extra_left_context,
extra_right_context=extra_right_context,
)
lengths = lengths + extra_left_context + extra_right_context
segments: List[Tuple[Tensor, Tensor]] = []
if segment_size is None:
segments.append((sequence, lengths))
return segments
offset = 0
end = sequence.shape[time_axis]
step = segment_size
size = extra_left_context + segment_size + extra_right_context
while offset + extra_left_context + extra_right_context < end:
clamped_size = min(size, end - offset)
segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size)
indices = torch.arange(
start=offset,
end=(offset + clamped_size),
step=1,
dtype=torch.long,
device=sequence.device,
)
segment_tensor = torch.index_select(sequence, time_axis, indices)
segments.append((segment_tensor, segment_lengths))
offset = offset + step
return segments
# ------------------------------------------------------------------------------
# segments_to_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def segments_to_sequence(
segments: List[Tuple[Tensor, Tensor]], time_axis: int
) -> Tuple[Tensor, Tensor]:
"""Concatenate segments into a full sequence."""
if len(segments) == 1:
return segments[0]
tensors_to_concat: List[Tensor] = []
lengths_to_stack: List[Tensor] = []
for tensor, lengths in segments:
tensors_to_concat.append(tensor)
lengths_to_stack.append(lengths)
sequence = torch.cat(tensors_to_concat, dim=time_axis)
lengths = torch.stack(lengths_to_stack, dim=0)
lengths = torch.sum(lengths, dim=0)
return sequence, lengths
def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
batch_first: whether to return a (B, T) tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = False for t < lengths[b] and True otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) > lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
# ------------------------------------------------------------------------------
# attention suppression
# ------------------------------------------------------------------------------
def attention_suppression(attention_weights: Tensor, scale: float):
# B, H, qlen, klen -> B, H, qlen, 1
attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1)
attention_nozeros = attention_prob.to(torch.bool)
nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True)
# For very sparse situation, we need get round about 0s
key_sum = torch.sum(attention_prob, dim=-1, keepdim=True)
# nozeros_sum should > 1
key_mean = key_sum / (nozeros_sum + 1e-8)
# std calculation
dis = (attention_prob - key_mean) * (attention_prob - key_mean)
# if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i
dis_masked = torch.where(
attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size())
)
key_var = torch.sum(dis_masked, dim=-1, keepdim=True)
key_var = key_var / (nozeros_sum - 1.0 + 1e-8)
key_std = torch.sqrt(key_var)
key_thread = key_mean - scale * key_std
# if attention_prob[i] >= key_thread, then attention_prob[i]
# , otherwise "-inf"
inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach()
inf_tensor[:] = float("-inf")
attention_weights_float = torch.where(
attention_prob < key_thread,
inf_tensor,
attention_weights.float(),
)
return attention_weights_float.type_as(attention_weights)
def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value):
return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
| 18,585
| 31.953901
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/cif_transformer.py
|
# @Time : 2022/1/20
# @Author : Minglun Han
# @File : cif_transformer.py
import sys
import logging
import math
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import torch
import torch.nn as nn
import numpy as np
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.wav2vec.wav2vec2 import (
MASKING_DISTRIBUTION_CHOICES,
TransformerSentenceEncoderLayer,
)
from fairseq.models.transformer import Embedding, Linear, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from fairseq.models.speech_to_text.s2t_transformer import (
Conv1dSubsampler,
S2TTransformerEncoder,
)
from torch import Tensor
logger = logging.getLogger(__name__)
np.set_printoptions(threshold=10000000)
torch.set_printoptions(profile="full")
@register_model("cif_transformer")
class CifTransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as downsample input
sequence for computational efficiency."""
def __init__(self, encoder, decoder, cif, ctc_proj):
# Register encoder and decoder
super().__init__(encoder, decoder)
# Register cif module and ctc projection
self.cif = cif
self.ctc_proj = ctc_proj
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
# Cif settings
parser.add_argument(
"--cif-embedding-dim",
type=int,
help="the dimension of the inputs of cif module",
)
parser.add_argument(
"--produce-weight-type",
type=str,
help="choose how to produce the weight for accumulation",
)
parser.add_argument(
"--cif-threshold", type=float, help="the threshold of firing"
)
parser.add_argument(
"--conv-cif-layer-num",
type=int,
help="the number of convolutional layers for cif weight generation",
)
parser.add_argument(
"--conv-cif-width",
type=int,
help="the width of kernel of convolutional layers",
)
parser.add_argument(
"--conv-cif-output-channels-num",
type=int,
help="the number of output channels of cif convolutional layers",
)
parser.add_argument(
"--conv-cif-dropout",
type=float,
)
parser.add_argument(
"--dense-cif-units-num",
type=int,
)
parser.add_argument(
"--apply-scaling",
type=bool,
)
parser.add_argument(
"--apply-tail-handling",
type=bool,
)
parser.add_argument(
"--tail-handling-firing-threshold",
type=float,
)
parser.add_argument(
"--add-cif-ctxt-layers",
type=bool,
)
parser.add_argument(
"--cif-ctxt-layers",
type=int,
)
parser.add_argument(
"--cif-ctxt-embed-dim",
type=int,
)
parser.add_argument(
"--cif-ctxt-ffn-embed-dim",
type=int,
)
parser.add_argument(
"--cif-ctxt-attention-heads",
type=int,
)
parser.add_argument(
"--cif-ctxt-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-activation-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-attention-dropout",
type=float,
)
parser.add_argument(
"--cif-ctxt-normalize-before",
type=bool,
)
# Extra decoder settings
parser.add_argument(
"--pre-final-proj-dim",
type=int,
)
parser.add_argument(
"--decoder-type",
type=str,
)
parser.add_argument(
"--nar-decoder-type",
type=str,
)
parser.add_argument(
"--decoder-dropout",
type=float,
)
parser.add_argument(
"--decoder-attention-dropout",
type=float,
)
parser.add_argument(
"--decoder-activation-dropout",
type=float,
)
parser.add_argument(
"--no-decoder-input-dropout",
type=bool,
# default=True,
)
parser.add_argument(
"--no-decoder-final-normalize",
type=bool,
# default=True,
)
# Other settings
parser.add_argument(
"--calulate-ctc-logits",
type=bool,
)
@classmethod
def build_encoder(cls, args):
# Apply original S2T Transformer Encoder as acoustic encoder
encoder = S2TTransformerEncoder(args)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
if args.decoder_type == "nar": # build non-auto regressive decoder
if args.nar_decoder_type == "projection":
return CifProjDecoder(args, task.target_dictionary)
elif args.nar_decoder_type == "transformer":
return CifNarTransformerDecoder(args, task.target_dictionary)
else:
raise NotImplementedError("Not implemented options.")
else:
raise NotImplementedError("Not implemented options")
@classmethod
def build_cif_middleware(cls, args):
return CifMiddleware(args)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
# build model main body
encoder = cls.build_encoder(args)
cif = cls.build_cif_middleware(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
# build ctc projection
ctc_proj = None
if args.calulate_ctc_logits:
ctc_proj = Linear(
args.encoder_embed_dim, len(task.target_dictionary)
).cuda()
return cls(encoder, decoder, cif, ctc_proj)
@staticmethod
def get_probs_from_logits(logits, log_probs=False):
"""
Get normalized probabilities (or log probs) from logits.
"""
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens, target_lengths):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
args:
src_tokens: speech features
src_lengths: speech feture sequence lengths
"""
# Forward acoustic encoder
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths
) # 2/4 times down sampling after acoustic encoder, just keep it
# Forward ctc projection to obtain ctc logits for ctc loss calculation
ctc_logits = None
if self.ctc_proj is not None:
ctc_logits = self.ctc_proj(encoder_out["encoder_out"][0]) # T x B x V
# Forward cif module
cif_out = self.cif(
encoder_outputs=encoder_out,
target_lengths=target_lengths if self.training else None,
input_lengths=encoder_out["conv_lengths"][0],
)
# Forward decoder part
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, cif_out=cif_out
)
final_outputs = {
# Encoder part outputs
"encoder_padding_mask": lengths_to_padding_mask(
encoder_out["conv_lengths"][0]
), # B x T
"ctc_logits": ctc_logits.transpose(0, 1), # B x T x V
# Cif module outputs
"quantity_out": cif_out[
"quantity_out"
], # Quantity out for quantity loss calculation
"cif_out": cif_out["cif_out"], # CIF out for decoder prediction, B x T x C
"cif_out_padding_mask": cif_out["cif_out_padding_mask"], # B x T
# Decoder part outputs
"decoder_out": decoder_out, # Decoder outputs (which is final logits for ce calculation)
}
return final_outputs
def get_ctc_output(self, src_tokens, src_lengths, **kwargs):
with torch.no_grad():
# Forward acoustic encoder
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths
) # 2/4 times down sampling after acoustic encoder, just keep it
# Forward ctc projection to obtain ctc logits for ctc loss calculation
ctc_logits = None
if self.ctc_proj is not None:
ctc_logits = self.ctc_proj(encoder_out["encoder_out"][0]) # T x B x V
encoder_outputs_padding_mask = lengths_to_padding_mask(
encoder_out["conv_lengths"][0]
)
return ctc_logits, encoder_outputs_padding_mask
def get_cif_output(self, src_tokens, src_lengths, target_lengths, **kwargs):
with torch.no_grad():
# Forward acoustic encoder
encoder_out = self.encoder(
src_tokens=src_tokens, src_lengths=src_lengths
) # 2/4 times down sampling after acoustic encoder, just keep it
# Forward cif module
cif_out = self.cif(
encoder_outputs=encoder_out,
target_lengths=target_lengths if self.training else None,
input_lengths=encoder_out["conv_lengths"][0],
)
return cif_out
def step_forward_decoder(
self, prev_decoded_tokens, cif_outputs, incremental_state=None, **kwargs
):
step_decoder_out, extra_outputs = self.decoder(
prev_output_tokens=prev_decoded_tokens,
cif_out=cif_outputs,
incremental_state=incremental_state,
**kwargs
)
return step_decoder_out, extra_outputs
class CifMiddleware(nn.Module):
def __init__(self, args):
super().__init__()
# Get configurations related to continuous integrate-and-fire
self.cif_threshold = args.cif_threshold
self.cif_output_dim = args.cif_embedding_dim
self.encoder_embed_dim = args.encoder_embed_dim
self.produce_weight_type = args.produce_weight_type
self.apply_scaling = args.apply_scaling
self.apply_tail_handling = args.apply_tail_handling
self.tail_handling_firing_threshold = args.tail_handling_firing_threshold
self.add_cif_ctxt_layers = args.add_cif_ctxt_layers
# Build weight projection layer to compute weight from encoder outputs
if self.produce_weight_type == "dense":
self.dense_proj = Linear(
self.encoder_embed_dim, args.dense_cif_units_num
).cuda()
self.weight_proj = Linear(args.dense_cif_units_num, 1).cuda()
elif self.produce_weight_type == "conv":
self.cif_conv_layer_num = args.conv_cif_layer_num
self.conv = torch.nn.Conv1d(
self.encoder_embed_dim,
args.conv_cif_output_channels_num,
args.conv_cif_width,
stride=1,
padding=int(args.conv_cif_width / 2),
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
).cuda()
self.conv_dropout = torch.nn.Dropout(p=args.conv_cif_dropout).cuda()
self.weight_proj = Linear(args.conv_cif_output_channels_num, 1).cuda()
else:
self.weight_proj = Linear(self.encoder_embed_dim, 1).cuda()
# Build the final projection layer for cif outputs
if self.cif_output_dim != self.encoder_embed_dim:
self.cif_output_proj = Linear(
self.encoder_embed_dim, self.cif_output_dim, bias=False
).cuda()
# Build cif contextual layers
if self.add_cif_ctxt_layers:
self.cif_ctxt_embed_dim = args.cif_ctxt_embed_dim
self.cif_ctxt_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=args.cif_ctxt_embed_dim,
ffn_embedding_dim=args.cif_ctxt_ffn_embed_dim,
num_attention_heads=args.cif_ctxt_attention_heads,
dropout=args.cif_ctxt_dropout,
activation_dropout=args.cif_ctxt_activation_dropout,
attention_dropout=args.cif_ctxt_attention_dropout,
layer_norm_first=args.cif_ctxt_normalize_before,
)
for _ in range(args.cif_ctxt_layers)
]
)
def forward(self, encoder_outputs, target_lengths=None, input_lengths=None):
"""
encoder_out should have shape B x T x C
encoder_padding_mask should have shape B x T
targets_length should have shape B
"""
# Prepare inputs
encoder_out = encoder_outputs["encoder_out"][0].transpose(0, 1) # B x T x C
if len(encoder_outputs["encoder_padding_mask"]) != 0:
encoder_padding_mask = encoder_outputs["encoder_padding_mask"][0] # B x T
else:
assert (
input_lengths is not None
), "Please ensure that input_lengths is provided."
encoder_padding_mask = lengths_to_padding_mask(input_lengths) # B x T
# Forward weight generation
if self.produce_weight_type == "dense":
proj_out = self.dense_proj(encoder_out)
act_proj_out = torch.relu(proj_out)
sig_input = self.weight_proj(act_proj_out)
weight = torch.sigmoid(sig_input)
# weight has shape [batch_size, length, 1]
elif self.produce_weight_type == "conv":
conv_input = encoder_out.permute(0, 2, 1)
# Adjust the shape of convolution layer input [B, C_in, T]
conv_out = self.conv(conv_input)
# conv_out has shape [B, C_out, T]
proj_input = conv_out.permute(0, 2, 1)
proj_input = self.conv_dropout(proj_input)
# Adjust conv output to shape [B, T, C_cif]
sig_input = self.weight_proj(proj_input)
weight = torch.sigmoid(sig_input)
else:
sig_input = self.weight_proj(encoder_out)
weight = torch.sigmoid(sig_input)
not_padding_mask = ~encoder_padding_mask
weight = (
torch.squeeze(weight, dim=-1) * not_padding_mask.int()
) # weight has shape B x T
org_weight = weight
# Sum weights
if self.training and self.apply_scaling and target_lengths is not None:
# if self.apply_scaling and target_lengths is not None: # For validation debugging
# Conduct scaling when training
# (target_lengths + 1 because this target_lengths does not take <eos> into consideration)
weight_sum = weight.sum(-1) # weight_sum has shape [batch_size]
normalize_scalar = torch.unsqueeze(
target_lengths / (weight_sum + 1e-8), -1
) # normalize_scalar has shape [batch_size, 1]
weight = weight * normalize_scalar
# Integrate and fire
batch_size = encoder_out.size(0)
max_length = encoder_out.size(1)
encoder_embed_dim = encoder_out.size(2)
padding_start_id = not_padding_mask.sum(-1) # shape B
# Initialize
accumulated_weights = torch.zeros(batch_size, 0, dtype=encoder_out.dtype).cuda()
accumulated_states = torch.zeros(
batch_size, 0, encoder_embed_dim, dtype=encoder_out.dtype
).cuda()
fired_states = torch.zeros(
batch_size, 0, encoder_embed_dim, dtype=encoder_out.dtype
).cuda()
# Begin integrate and fire
for i in range(max_length):
# Get previous states from the recorded tensor
prev_accumulated_weight = (
torch.zeros([batch_size], dtype=encoder_out.dtype).cuda()
if i == 0
else accumulated_weights[:, i - 1]
)
prev_accumulated_state = (
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda()
if i == 0
else accumulated_states[:, i - 1, :]
)
# Decide whether positioning a boundary
cur_is_fired = (
(prev_accumulated_weight + weight[:, i]) >= self.cif_threshold
).unsqueeze(dim=-1)
# cur_is_fired with shape [batch_size, 1]
# Update the accumulated weights by considering whether positioning a boundary
cur_weight = torch.unsqueeze(weight[:, i], -1)
# cur_weight has shape [batch_size, 1]
prev_accumulated_weight = torch.unsqueeze(prev_accumulated_weight, -1)
# prev_accumulated_weight also has shape [batch_size ,1]
remained_weight = (
torch.ones_like(prev_accumulated_weight, dtype=encoder_out.dtype).cuda()
- prev_accumulated_weight
)
# remained_weight with shape [batch_size ,1]
# Obtain the accumulated weight of current step
cur_accumulated_weight = torch.where(
cur_is_fired,
cur_weight - remained_weight,
cur_weight + prev_accumulated_weight,
) # [batch_size, 1]
# Obtain accumulated state of current step
cur_accumulated_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
(cur_weight - remained_weight) * encoder_out[:, i, :],
prev_accumulated_state + cur_weight * encoder_out[:, i, :],
) # B x C
# Obtain fired state of current step
# firing locations has meaningful representations, while non-firing locations is all-zero embeddings
cur_fired_state = torch.where(
cur_is_fired.repeat(1, encoder_embed_dim),
prev_accumulated_state + remained_weight * encoder_out[:, i, :],
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
) # B x C
# Handling the speech tail by rounding up and down
if (not self.training) and self.apply_tail_handling:
# When encoder output position exceeds the max valid position,
# if accumulated weights is greater than tail_handling_firing_threshold,
# current state should be reserved, otherwise it is discarded.
cur_fired_state = torch.where(
i
== padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
), # B x C
torch.where(
cur_accumulated_weight.repeat([1, encoder_embed_dim])
<= self.tail_handling_firing_threshold, # B x C
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
# less equal than tail_handling_firing_threshold, discarded.
cur_accumulated_state / (cur_accumulated_weight + 1e-10)
# bigger than tail_handling_firing_threshold, normalized and kept.
# eps = 1e-10 for preveting overflow.
),
cur_fired_state,
) # B x C
# For normal condition, including both training and evaluation
# Mask padded locations with all-zero embeddings
cur_fired_state = torch.where(
torch.full(
[batch_size, encoder_embed_dim], i, dtype=encoder_out.dtype
).cuda()
> padding_start_id.unsqueeze(dim=-1).repeat(
[1, encoder_embed_dim]
), # B x C
torch.zeros(
[batch_size, encoder_embed_dim], dtype=encoder_out.dtype
).cuda(),
cur_fired_state,
)
# Update accumulated arguments
accumulated_weights = torch.cat(
(accumulated_weights, cur_accumulated_weight), 1
) # B x T
accumulated_states = torch.cat(
(accumulated_states, torch.unsqueeze(cur_accumulated_state, 1)), 1
) # shape = [B, L, D]
fired_states = torch.cat(
(fired_states, torch.unsqueeze(cur_fired_state, 1)), 1
) # shape = [B, L, D]
# Extracts cif_outputs for each utterance
fired_marks = (torch.abs(fired_states).sum(-1) != 0.0).int() # B x T
fired_utt_length = fired_marks.sum(-1) # B
fired_max_length = (
fired_utt_length.max().int()
) # The maximum of fired times in current batch
cif_outputs = torch.zeros(
[0, fired_max_length, encoder_embed_dim], dtype=encoder_out.dtype
).cuda() # Initialize cif outputs
cif_durations = torch.zeros(
[0, fired_max_length], dtype=torch.int32
).cuda() # Initialize cif durations
def dynamic_partition(
data: torch.Tensor, partitions: torch.Tensor, num_partitions=None
):
assert (
len(partitions.shape) == 1
), "Only one dimensional partitions supported"
assert (
data.shape[0] == partitions.shape[0]
), "Partitions requires the same size as data"
if num_partitions is None:
num_partitions = max(torch.unique(partitions))
return [data[partitions == i] for i in range(num_partitions)]
for j in range(batch_size):
# Get information of j-th sample
cur_utt_fired_mark = fired_marks[j, :]
cur_utt_fired_state = fired_states[j, :, :]
cur_utt_outputs = dynamic_partition(
cur_utt_fired_state, cur_utt_fired_mark, 2
)
cur_utt_output = cur_utt_outputs[1] # Get integrated representations
cur_utt_length = cur_utt_output.size(0) # The total number of firing
pad_length = fired_max_length - cur_utt_length # Calculate padding length
cur_utt_output = torch.cat(
(
cur_utt_output,
torch.full(
[pad_length, encoder_embed_dim], 0.0, dtype=encoder_out.dtype
).cuda(),
),
dim=0,
) # Pad current utterance cif outputs to fired_max_length
cur_utt_output = torch.unsqueeze(cur_utt_output, 0)
# Reshape to [1, fired_max_length, encoder_embed_dim]
# Concatenate cur_utt_output and cif_outputs along batch axis
cif_outputs = torch.cat([cif_outputs, cur_utt_output], 0)
# Collect cif durations
cur_fired_indices = torch.nonzero(cur_utt_fired_mark)[:, -1]
shifted_cur_fired_indices = torch.cat(
[torch.zeros([1], dtype=torch.int32).cuda(), cur_fired_indices], dim=-1
)[: cur_fired_indices.size(0)]
cur_cif_durations = cur_fired_indices - shifted_cur_fired_indices
cur_cif_durations = torch.cat(
(
cur_cif_durations,
torch.full([pad_length], 0, dtype=torch.int32).cuda(),
),
dim=0,
).unsqueeze(dim=0)
cif_durations = torch.cat(
[cif_durations, cur_cif_durations], dim=0
) # cancat at batch axis
cif_out_padding_mask = (torch.abs(cif_outputs).sum(-1) != 0.0).int()
cif_outputs_lens = cif_out_padding_mask.sum(-1)
cif_outputs = torch.where(
cif_outputs_lens.unsqueeze(-1).unsqueeze(-1) > 0, # B x T x C
cif_outputs,
torch.zeros_like(cif_outputs).type_as(cif_outputs).cuda(),
)
# Check: Handle exceptions
if cif_out_padding_mask.size(-1) == 0:
cif_outputs = (
torch.zeros([batch_size, 1, cif_outputs.size(-1)])
.type_as(cif_outputs)
.cuda()
)
cif_out_padding_mask = torch.ones([batch_size, 1]).bool().cuda()
base_cif_out_padding_mask = (
torch.cat(
[torch.ones(1), torch.zeros(cif_out_padding_mask.size(-1) - 1)], dim=0
)
.unsqueeze(0)
.cuda()
) # 1 x 1
temp_cif_out_padding_mask = base_cif_out_padding_mask.repeat(
cif_out_padding_mask.size(0), 1
) # B x T
cif_out_padding_mask = torch.where(
cif_outputs_lens.unsqueeze(-1) > 0, # B x T
cif_out_padding_mask,
temp_cif_out_padding_mask.type_as(cif_out_padding_mask),
)
if self.training:
# In training phase, use the sum of original weights
# as quantity out for quantity loss.
quantity_out = org_weight.sum(-1)
else:
quantity_out = weight.sum(-1)
if self.cif_output_dim != encoder_embed_dim:
cif_outputs = self.cif_output_proj(cif_outputs)
ctxt_cif_outputs = None
if self.add_cif_ctxt_layers and self.cif_output_dim == self.cif_ctxt_embed_dim:
x = cif_outputs.transpose(0, 1)
padding_mask = ~cif_out_padding_mask.bool()
for layer in self.cif_ctxt_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
ctxt_cif_outputs = x.transpose(0, 1)
return {
"cif_out": cif_outputs, # shape = [batch_size, fired_max_length, cif_output_dim]
"cif_out_padding_mask": cif_out_padding_mask, # shape = [batch_size, fired_max_length]
"ctxt_cif_out": ctxt_cif_outputs, # shape = [batch_size, fired_max_length, cif_ctxt_embed_dim]
"quantity_out": quantity_out, # shape = [batch_size]
"cif_durations": cif_durations, # shape = [batch_size, fired_max_length]
}
class CifProjDecoder(FairseqDecoder):
def __init__(self, cfg, dictionary):
super().__init__(dictionary)
# Load parameters and build model
if cfg.no_decoder_input_dropout:
self.input_dropout = None
else:
self.input_dropout = FairseqDropout(
p=cfg.decoder_dropout, module_name=self.__class__.__name__
)
if cfg.decoder_normalize_before and not cfg.no_decoder_final_normalize:
self.layer_norm = LayerNorm(cfg.encoder_embed_dim)
else:
self.layer_norm = None
self.pre_final_proj_dim = cfg.pre_final_proj_dim
self.output_dim = len(self.dictionary)
self.output_proj = Linear(self.pre_final_proj_dim, self.output_dim).cuda()
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = (
cif_out["ctxt_cif_out"]
if cif_out["ctxt_cif_out"] is not None
else cif_out["cif_out"]
)
# Collect shape information
batch_size, cif_len, cif_embed_dim = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim], dtype=x.dtype).cuda()
# Regularize the length of input tokens and cif outputs
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
# Add dropout
if self.input_dropout is not None:
x = self.input_dropout(x)
# Add normalization
if self.layer_norm is not None:
x = self.layer_norm(x)
# Forword decoder
final_logits = self.output_proj(x)
return final_logits, None
class CifNarTransformerDecoder(CifProjDecoder):
def __init__(self, cfg, dictionary):
super().__init__(cfg, dictionary)
# Load decoder parameters
self.decoder_layers = cfg.decoder_layers
self.decoder_embed_dim = cfg.decoder_embed_dim
self.decoder_ffn_embed_dim = cfg.decoder_ffn_embed_dim
self.decoder_attention_heads = cfg.decoder_attention_heads
self.decoder_normalize_before = cfg.decoder_normalize_before
self.decoder_dropout = cfg.decoder_dropout
self.decoder_attention_dropout = cfg.decoder_attention_dropout
self.decoder_activation_dropout = cfg.decoder_activation_dropout
assert (
self.decoder_embed_dim == self.pre_final_proj_dim
), "ensure that the dimension of decoder outputs is equal to pre_final_proj_dim"
# Build decoder stacks
self.decoder_stacks = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.decoder_embed_dim,
ffn_embedding_dim=self.decoder_ffn_embed_dim,
num_attention_heads=self.decoder_attention_heads,
dropout=self.decoder_dropout,
activation_dropout=self.decoder_activation_dropout,
attention_dropout=self.decoder_attention_dropout,
layer_norm_first=self.decoder_normalize_before,
)
for _ in range(cfg.decoder_layers)
]
)
def forward(self, prev_output_tokens=None, cif_out=None, **kwargs):
x = (
cif_out["ctxt_cif_out"]
if cif_out["ctxt_cif_out"] is not None
else cif_out["cif_out"]
) # B x T x C
padding_mask = ~cif_out["cif_out_padding_mask"].bool() # B x T
# Collect shape information
batch_size, cif_len, cif_embed_dim = x.size()
prev_output_tokens_len = prev_output_tokens.size(1)
# Handle exception of No Elements in cif_outputs
if cif_len == 0 and not self.training:
cif_len = 1
x = torch.zeros([batch_size, cif_len, cif_embed_dim]).cuda() # B x 1 x C
padding_mask = torch.zeros([batch_size, cif_len]).cuda() # B x 1
# Regularize the length of input tokens and cif outputs, and padding_mask
min_len = min(prev_output_tokens_len, cif_len)
x = x[:, :min_len, :] # B x min_len x C
padding_mask = padding_mask[:, :min_len] # B x min_len
# Forward input dropout
if self.input_dropout is not None:
x = self.input_dropout(x)
# Forward decoder
x = x.transpose(0, 1) # T x B x C
for layer in self.decoder_stacks:
x, _ = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
x = x.transpose(0, 1) # B x T x C
# Forward normalization
if self.layer_norm is not None:
x = self.layer_norm(x)
final_logits = self.output_proj(x)
return final_logits, None
@register_model_architecture(model_name="cif_transformer", arch_name="cif_transformer")
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 768
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 768)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.5
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_alpha"
)
def cif_transformer_alpha(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
base_architecture(args)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_1"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_2"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_3"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 512)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 256
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 256)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_4"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "transformer")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", True)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", True)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_5"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", False)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", False)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_6"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "transformer")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", False)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", False)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_7"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "transformer")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", True)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", True)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_8"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "projection")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", True)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", True)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp1_9"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 512
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 512)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "transformer")
args.decoder_dropout = getattr(args, "decoder_dropout", 0.25)
args.decoder_attention_dropout = getattr(args, "decoder_attention_dropout", 0.25)
args.decoder_activation_dropout = getattr(args, "decoder_activation_dropout", 0.25)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", False)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", False)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
@register_model_architecture(
model_name="cif_transformer", arch_name="cif_transformer_exp6"
)
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 512)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.15)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
# Cif settings
args.cif_embedding_dim = getattr(args, "cif_embedding_dim", args.encoder_embed_dim)
args.produce_weight_type = getattr(args, "produce_weight_type", "conv")
args.cif_threshold = getattr(args, "cif_threshold", 0.99)
args.conv_cif_layer_num = getattr(args, "conv_cif_layer_num", 1)
args.conv_cif_width = getattr(args, "conv_cif_width", 3)
args.conv_cif_output_channels_num = getattr(
args, "conv_cif_output_channels_num", 256
)
args.conv_cif_dropout = getattr(args, "conv_cif_dropout", args.dropout)
args.dense_cif_units_num = getattr(args, "dense_cif_units_num", 256)
args.apply_scaling = getattr(args, "conv_cif_dropout", True)
args.apply_tail_handling = getattr(args, "apply_tail_handling", True)
args.tail_handling_firing_threshold = getattr(
args, "tail_handling_firing_threshold", 0.4
)
args.add_cif_ctxt_layers = getattr(args, "add_cif_ctxt_layers", False)
args.cif_ctxt_layers = getattr(args, "cif_ctxt_layers", 2)
args.cif_ctxt_embed_dim = getattr(
args, "cif_ctxt_embed_dim", args.encoder_embed_dim
)
args.cif_ctxt_ffn_embed_dim = getattr(
args, "cif_ctxt_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.cif_ctxt_attention_heads = getattr(
args, "cif_ctxt_attention_heads", args.encoder_attention_heads
)
args.cif_ctxt_dropout = getattr(args, "cif_ctxt_dropout", args.dropout)
args.cif_ctxt_activation_dropout = getattr(
args, "cif_ctxt_activation_dropout", args.activation_dropout
)
args.cif_ctxt_attention_dropout = getattr(
args, "cif_ctxt_attention_dropout", args.attention_dropout
)
args.cif_ctxt_normalize_before = getattr(
args, "cif_ctxt_normalize_before", args.encoder_normalize_before
)
# Cif-style Decoder settings
args.pre_final_proj_dim = getattr(
args, "pre_final_proj_dim", args.decoder_embed_dim
)
args.decoder_type = getattr(args, "decoder_type", "nar")
args.nar_decoder_type = getattr(args, "nar_decoder_type", "transformer")
args.decoder_dropout = getattr(args, "decoder_dropout", args.dropout)
args.decoder_attention_dropout = getattr(
args, "decoder_attention_dropout", args.attention_dropout
)
args.decoder_activation_dropout = getattr(
args, "decoder_activation_dropout", args.activation_dropout
)
args.no_decoder_final_normalize = getattr(args, "no_decoder_final_normalize", False)
args.no_decoder_input_dropout = getattr(args, "no_decoder_input_dropout", False)
# Other settings
args.calulate_ctc_logits = getattr(args, "calulate_ctc_logits", True)
| 91,976
| 43.071394
| 112
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/convtransformer.py
|
#!/usr/bin/env python3
import logging
import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import LayerNorm, PositionalEmbedding, TransformerEncoderLayer
from torch import Tensor
logger = logging.getLogger(__name__)
@register_model("convtransformer")
class ConvTransformerModel(FairseqEncoderDecoderModel):
"""
Transformer-based Speech translation model from ESPNet-ST
https://arxiv.org/abs/2004.10234
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--decoder-output-dim",
type=int,
metavar="N",
help="decoder output dimension (extra linear layer if different from decoder embed dim)",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--load-pretrained-decoder-from",
type=str,
metavar="STR",
help="model to take decoder weights from (for initialization)",
)
parser.add_argument(
"--conv-out-channels",
type=int,
metavar="INT",
help="the number of output channels of conv layer",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
decoder = TransformerDecoderNoExtra(args, task.target_dictionary, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
@staticmethod
@torch.jit.unused
def set_batch_first(lprobs):
lprobs.batch_first = True
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
if self.training:
self.set_batch_first(lprobs)
return lprobs
def output_layout(self):
return "BTD"
"""
The forward method inherited from the base class has a **kwargs argument in
its input, which is not supported in torchscript. This method overrites the forward
method definition without **kwargs.
"""
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class ConvTransformerEncoder(FairseqEncoder):
"""Conv + Transformer encoder"""
def __init__(self, args):
"""Construct an Encoder object."""
super().__init__(None)
self.dropout = args.dropout
self.embed_scale = (
1.0 if args.no_scale_embedding else math.sqrt(args.encoder_embed_dim)
)
self.padding_idx = 1
self.in_channels = 1
self.input_dim = args.input_feat_per_channel
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, args.conv_out_channels, 3, stride=2, padding=3 // 2),
torch.nn.ReLU(),
torch.nn.Conv2d(
args.conv_out_channels,
args.conv_out_channels,
3,
stride=2,
padding=3 // 2,
),
torch.nn.ReLU(),
)
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim, args.conv_out_channels
)
self.out = torch.nn.Linear(transformer_input_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions,
args.encoder_embed_dim,
self.padding_idx,
learned=False,
)
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[TransformerEncoderLayer(args) for i in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def pooling_ratio(self):
return 4
def infer_conv_output_dim(self, in_channels, input_dim, out_channels):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
x = torch.nn.Conv2d(1, out_channels, 3, stride=2, padding=3 // 2)(x)
x = torch.nn.Conv2d(out_channels, out_channels, 3, stride=2, padding=3 // 2)(x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def forward(self, src_tokens, src_lengths):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
input_len_0 = (src_lengths.float() / subsampling_factor).ceil().long()
input_len_1 = x.size(0) * torch.ones([src_lengths.size(0)]).long().to(
input_len_0.device
)
input_lengths = torch.min(input_len_0, input_len_1)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if not encoder_padding_mask.any():
maybe_encoder_padding_mask = None
else:
maybe_encoder_padding_mask = encoder_padding_mask
return {
"encoder_out": [x],
"encoder_padding_mask": [maybe_encoder_padding_mask]
if maybe_encoder_padding_mask is not None
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
(encoder_out["encoder_padding_mask"][0]).index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
(encoder_out["encoder_embedding"][0]).index_select(0, new_order)
]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out,
"encoder_padding_mask": new_encoder_padding_mask,
"encoder_embedding": new_encoder_embedding,
"encoder_states": encoder_states,
"src_tokens": [],
"src_lengths": [],
}
class TransformerDecoderNoExtra(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="convtransformer", arch_name="convtransformer")
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.max_source_positions = getattr(args, "max_source_positions", 3000)
args.max_target_positions = getattr(args, "max_target_positions", 1024)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.conv_out_channels = getattr(args, "conv_out_channels", args.encoder_embed_dim)
@register_model_architecture("convtransformer", "convtransformer_espnet")
def convtransformer_espnet(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
| 16,567
| 35.899777
| 101
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .berard import * # noqa
from .convtransformer import * # noqa
from .s2t_transformer import * # noqa
from .xm_transformer import * # noqa
from .cif_transformer import * # noqa
from .s2t_cif_transformer import * # noqa
| 406
| 32.916667
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/s2t_transformer.py
|
#!/usr/bin/env python3
import logging
import math
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import torch
import torch.nn as nn
from fairseq import checkpoint_utils, utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import Embedding, TransformerDecoder
from fairseq.modules import (
FairseqDropout,
LayerNorm,
PositionalEmbedding,
TransformerEncoderLayer,
)
from torch import Tensor
logger = logging.getLogger(__name__)
class Conv1dSubsampler(nn.Module):
"""Convolutional subsampler: a stack of 1D convolution (along temporal
dimension) followed by non-linear activation via gated linear units
(https://arxiv.org/abs/1911.08460)
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
in_channels: int,
mid_channels: int,
out_channels: int,
kernel_sizes: List[int] = (3, 3),
):
super(Conv1dSubsampler, self).__init__()
self.n_layers = len(kernel_sizes)
self.conv_layers = nn.ModuleList(
nn.Conv1d(
in_channels if i == 0 else mid_channels // 2,
mid_channels if i < self.n_layers - 1 else out_channels * 2,
k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(kernel_sizes)
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for _ in range(self.n_layers):
out = ((out.float() - 1) / 2 + 1).floor().long()
return out
def forward(self, src_tokens, src_lengths):
bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D)
x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T
# print(x.size())
for conv in self.conv_layers:
x = conv(x)
# print(x.size())
x = nn.functional.glu(
x, dim=1
) # GLU activation will cause dimension discount 50% in default.
# print(x.size())
_, _, out_seq_len = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D)
return x, self.get_out_seq_lens_tensor(src_lengths)
@register_model("s2t_transformer")
class S2TTransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as downsample input
sequence for computational efficiency."""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# input
parser.add_argument(
"--conv-kernel-sizes",
type=str,
metavar="N",
help="kernel sizes of Conv1d subsampling layers",
)
parser.add_argument(
"--conv-channels",
type=int,
metavar="N",
help="# of channels in Conv1d subsampling layers",
)
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
default="relu",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--no-scale-embedding",
action="store_true",
help="if True, dont scale embeddings",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
"--encoder-freezing-updates",
type=int,
metavar="N",
help="freeze encoder for first N updates",
)
@classmethod
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, "load_pretrained_encoder_from", None)
if pretraining_path is not None:
if not Path(pretraining_path).exists():
logger.warning(
f"skipped pretraining because {pretraining_path} does not exist"
)
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=pretraining_path
)
logger.info(f"loaded pretrained encoder from: {pretraining_path}")
return encoder
@classmethod
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(
task.target_dictionary, args.decoder_embed_dim
)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(
prev_output_tokens=prev_output_tokens, encoder_out=encoder_out
)
return decoder_out
class S2TTransformerEncoder(FairseqEncoder):
"""
Speech-to-text Transformer encoder that consists of input subsampler and Transformer encoder.
"""
def __init__(self, args):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.subsample = Conv1dSubsampler(
args.input_feat_per_channel * args.input_channels,
args.conv_channels,
args.encoder_embed_dim,
[int(k) for k in args.conv_kernel_sizes.split(",")],
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.transformer_layers = nn.ModuleList(
[TransformerEncoderLayer(args) for _ in range(args.encoder_layers)]
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
x, input_lengths = self.subsample(src_tokens, src_lengths)
x = self.embed_scale * x
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask]
if encoder_padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
"conv_lengths": [input_lengths],
}
def forward(self, src_tokens, src_lengths, return_all_hiddens=False):
if self.num_updates < self.encoder_freezing_updates:
with torch.no_grad():
x = self._forward(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
else:
x = self._forward(
src_tokens, src_lengths, return_all_hiddens=return_all_hiddens
)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = (
[]
if len(encoder_out["encoder_out"]) == 0
else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]]
)
new_encoder_padding_mask = (
[]
if len(encoder_out["encoder_padding_mask"]) == 0
else [
x.index_select(0, new_order)
for x in encoder_out["encoder_padding_mask"]
]
)
new_encoder_embedding = (
[]
if len(encoder_out["encoder_embedding"]) == 0
else [
x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]
]
)
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [], # B x T
"src_lengths": [], # B x 1
}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates
class TransformerDecoderScriptable(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
@register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer")
def base_architecture(args):
args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0)
# Convolutional subsampler
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
# Transformer
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
@register_model_architecture("s2t_transformer", "s2t_transformer_s")
def s2t_transformer_s(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.dropout = getattr(args, "dropout", 0.1)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_xs")
def s2t_transformer_xs(args):
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_layers = getattr(args, "decoder_layers", 3)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4)
args.dropout = getattr(args, "dropout", 0.3)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_sp")
def s2t_transformer_sp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_s(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_m")
def s2t_transformer_m(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.dropout = getattr(args, "dropout", 0.15)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_mp")
def s2t_transformer_mp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_m(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_l")
def s2t_transformer_l(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.2)
base_architecture(args)
@register_model_architecture("s2t_transformer", "s2t_transformer_lp")
def s2t_transformer_lp(args):
args.encoder_layers = getattr(args, "encoder_layers", 16)
s2t_transformer_l(args)
| 19,248
| 35.875479
| 97
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/modules/emformer.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import re
from functools import partial
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq.models import (
FairseqEncoder,
)
from fairseq.models.speech_to_text.utils import (
NoOp,
lengths_to_padding_mask,
segments_to_sequence,
)
from fairseq.models.speech_to_text.utils import (
attention_suppression,
layer_norm_backward_hook,
)
from torch import Tensor, device as Device
from torch.ao.quantization.qconfig import (
default_dynamic_qconfig,
per_channel_dynamic_qconfig,
)
class RelativePositionEmbedding(nn.Module):
"""
Implementation according to https://arxiv.org/abs/1803.02155
"""
def __init__(self, head_dim, max_position, norm_init=True):
super().__init__()
self.head_dim = head_dim
self.max_position = max_position
self.embeddings = nn.Parameter(torch.Tensor(max_position * 2 + 1, head_dim))
if norm_init:
nn.init.xavier_normal_(self.embeddings)
else:
nn.init.xavier_uniform_(self.embeddings)
def forward(self, input: Tensor):
output = nn.functional.embedding(input.long(), self.embeddings)
return output
class Fp32LayerNorm(nn.Module):
def __init__(
self,
input_dim,
clamp_grad=True,
max_grad_value=256,
eps=1e-5,
elementwise_affine=True,
):
super().__init__()
self.torch_module = torch.nn.LayerNorm(
input_dim, eps=eps, elementwise_affine=elementwise_affine
)
if clamp_grad:
hook = partial(layer_norm_backward_hook, clamp_value=max_grad_value)
self.torch_module.register_backward_hook(hook)
def forward(self, input):
output = torch.nn.functional.layer_norm(
input.float(),
self.torch_module.normalized_shape,
self.torch_module.weight.float()
if self.torch_module.weight is not None
else None,
self.torch_module.bias.float()
if self.torch_module.bias is not None
else None,
self.torch_module.eps,
).type_as(input)
return output
# ------------------------------------------------------------------------------
# PositionwiseFF
# ------------------------------------------------------------------------------
class PositionwiseFF(nn.Module):
"""
FFN layer in transformer.
Args:
input_dim: input embedding dimension
ffn_dim: FFN layer inner dimension
dropout_on_fc1: dropout for first linear layer
dropout_on_fc2: dropout fr second linear layer
activation_fn: activation function used after first linear layer. \
Only relu or gelu is supported.
"""
def __init__(
self, input_dim, ffn_dim, dropout_on_fc1, dropout_on_fc2, activation_fn
):
super(PositionwiseFF, self).__init__()
self.input_dim = input_dim
self.ffn_dim = ffn_dim
if activation_fn == "relu":
ac = nn.ReLU()
elif activation_fn == "gelu":
ac = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(activation_fn))
# fc1 -> ac -> dropout -> fc2 -> dropout
self.module = nn.Sequential(
nn.Linear(input_dim, ffn_dim),
ac,
nn.Dropout(dropout_on_fc1),
nn.Linear(ffn_dim, input_dim),
nn.Dropout(dropout_on_fc2),
)
self.layer_norm = Fp32LayerNorm(input_dim)
def forward(self, input):
module_out = self.module(self.layer_norm(input))
output = module_out + input
return output
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# SummarizationLayer
# ------------------------------------------------------------------------------
class SummarizationLayer(nn.Module):
def __init__(self, method, segment_size, embedding_dim):
super(SummarizationLayer, self).__init__()
self.segment_size = segment_size
self.embedding_dim = embedding_dim
nonlin_match = re.match(r"nonlinear\((?P<act>[a-z]+),(?P<dim>[0-9]+)\)", method)
self.method = method
if method == "mean":
self.module = nn.AvgPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "max":
self.module = nn.MaxPool1d(
kernel_size=segment_size,
stride=segment_size,
ceil_mode=True,
)
elif method == "linear":
self.module = nn.Linear(segment_size, 1)
elif nonlin_match:
nonlin_args = nonlin_match.groupdict()
act_type = nonlin_args["act"]
hid_dim = int(nonlin_args["dim"])
if act_type == "relu":
act = nn.ReLU()
elif act_type == "gelu":
act = nn.GELU()
else:
raise ValueError("Unsupported activation_fn = ({})".format(act_type))
self.module = nn.Sequential(
nn.Linear(segment_size, hid_dim),
act,
nn.Linear(hid_dim, 1),
)
else:
raise ValueError("Unsupported summarization method = ({})".format(method))
def forward(self, input):
# T, B, D -> B, D, T
input = input.permute(1, 2, 0)
if self.method == "mean" or self.method == "max":
output = self.module(input)
output = output.permute(2, 0, 1)
return output
full_seg_length = input.size(2) // self.segment_size * self.segment_size
if full_seg_length > 0:
# at least one seg is full
B = input.size(0)
D = input.size(1)
input_todo = (
input[:, :, :full_seg_length]
.contiguous()
.view(B, -1, self.segment_size)
)
output = self.module(input_todo)
output = output.view(B, D, -1)
else:
output = input.new_zeros(input.size(0), input.size(1), 0)
left = input.size(2) - full_seg_length
if left > 0:
# when last seg is not full, use zeros as last memory placeholder
zeros = input.new_zeros(input.size(0), input.size(1), 1)
output = torch.cat([output, zeros], dim=2)
output = output.permute(2, 0, 1)
return output
# ------------------------------------------------------------------------------
# NoSegAugmentedMemoryMultiheadAttentionBmm
# ------------------------------------------------------------------------------
class NoSegAugmentedMemoryMultiheadAttentionBmm(nn.Module):
"""
Whole utterance augmented memory multihead attention using BMM.
Different with previous augmented memory multihead attention where
the utterance is chunked into segments. Here we use attention mask
achieve so. The input embedding [right_context, utterance, summary]
is a concatenation of right context, utterance and summary.
Right context block is the concatenation of all the right context for
each segments. [right_context_0, right_context_1, ..., right_context_n]
For example, if we have utterance = [v0, v1, v2, ...., v20]. segment
size 8, right_context size 4. Then the right context blocks =
[v8, v9, v10, v11, v16, v17, v18, v19, 0, 0, 0, 0], where v8, v9, v10,
and v11 are the right context for first segment. v16, v17, v18 and v19
are the right context for second segment. 0, 0, 0 and 0 are right context
for the last segment.
utterance is corresponding to input embedding sequence
summary is concatenation of average of each segments. [summary_0,
summary_1, ..., ].
In augmented memory multihead attention, the query is [right_context,
utterance, summary], key is [memory, right_context, utterance]. Different
with AugmentedMemoryMultiheadAttentionBmm, memory here is passed from
previous attention layer. For the first attention layer, memory is average
of each segment.
Memory is a concatenation of memory from each segments in previous attention
layer. For example, current layer is i, then memory is [m_0, m_1, ..., m_n].
Each m_k is the output from seg_k in layer i-1.
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
dropout: attention dropout
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
scaled_init: whether to use scaled init for linear weight
tanh_on_mem: whether to use tanh on memory output
use_mem: whether to use memory or not. When max_memory_size is 0, then
we don't have memory anymore.
layer_index: current self-attention layer index that is used in depth
initialization
max_relative_position: max relative position used in relative position
embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
"""
def __init__(
self,
input_dim,
num_heads,
dropout=0.0,
std_scale=None,
scaled_init=False,
tanh_on_mem=False,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
max_relative_position=0,
rpe_old_option=True,
):
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
super().__init__()
embed_dim = input_dim
self.e2h_kv = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.e2h_q = torch.nn.Linear(input_dim, input_dim, bias=True)
self.rpe_old_option = rpe_old_option
if max_relative_position > 0:
self.use_rpe = True
self.rpe_k = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
self.rpe_v = RelativePositionEmbedding(
head_dim=input_dim // num_heads,
max_position=max_relative_position,
)
else:
self.use_rpe = False
self.rpe_k = None
self.rpe_v = None
if scaled_init:
if layer_index == -1:
gain = 1.0 / math.sqrt(2)
else:
# https://arxiv.org/abs/2005.09684 depthwise initialization
# stablize the training greatly. Use depthwise initialization to
# replace incremental loss.
gain = 1.0 / math.sqrt(layer_index + 1)
torch.nn.init.xavier_uniform_(self.e2h_kv.weight, gain=gain)
torch.nn.init.xavier_uniform_(self.e2h_q.weight, gain=gain)
self.out_proj = torch.nn.Linear(embed_dim, embed_dim, bias=True)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.std_scale = std_scale
self.use_mem = use_mem
self.mini_batches = mini_batches
self.negative_inf = negative_inf
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = NoOp()
self.nonlinear_squash_mem = False
def prepare_qkv(
self,
input: Tensor,
mems: Tensor,
lengths: Tensor,
summary_length: int,
lc_length: int,
):
# T: right_context length + utterance_length + summary_length
T, B, D = input.shape
mem_length = mems.size(0)
utterance_length = torch.max(lengths)
right_context_blocks_length = T - utterance_length - summary_length
rc_block = input[:right_context_blocks_length, :, :]
utterance_block = input[right_context_blocks_length : T - summary_length, :, :]
if B == 1:
padding_mask = None
else:
klengths = lengths + mem_length + right_context_blocks_length + lc_length
padding_mask = lengths_to_padding_mask(lengths=klengths)
mem_rc_input = torch.cat([mems, rc_block, utterance_block], dim=0)
# In training lc_length = 0
key_length = mem_rc_input.size(0) + lc_length
rc_input_sum = input
q = self.e2h_q(rc_input_sum)
kv = self.e2h_kv(mem_rc_input)
k, v = kv.chunk(chunks=2, dim=2)
result_qkv = (q, k, v)
input_shape = (T, B, D)
result_lengths_info = (
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
)
if padding_mask is not None:
assert padding_mask.size(0) == B
assert padding_mask.size(1) == key_length
return result_qkv, input_shape, result_lengths_info, padding_mask
def prepare_attention_weights(
self,
q: Tensor,
new_k: Tensor,
new_v: Tensor,
input_shape: Tuple[int, int, int],
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
T, B, D = input_shape
q = (
q.contiguous().view(-1, B * self.num_heads, self.head_dim).transpose(0, 1)
* self.scaling
)
k = (
new_k.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
new_v.contiguous()
.view(-1, B * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_k = self.rpe_k(rpe)
# [q, B*h, d] * [q, k, d] -> [B*h, q, k]
attention_weights_rpe = torch.matmul(
q.transpose(0, 1), r_k.transpose(1, 2)
).transpose(0, 1)
attention_weights = attention_weights + attention_weights_rpe
attention_weights_float = attention_weights.float()
return attention_weights, attention_weights_float, v
def prepare_attention_output(
self,
attention_weights: Tensor,
attention_weights_float: Tensor,
v: Tensor,
input_shape: Tuple[int, int, int],
key_length: int,
padding_mask: Optional[Tensor],
rpe: Optional[Tensor],
) -> Tensor:
T, B, D = input_shape
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, key_length
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, key_length
)
if self.std_scale is not None:
attention_weights_float = attention_suppression(
attention_weights_float, self.std_scale
)
attention_weights_float = torch.nn.functional.softmax(
attention_weights_float, dim=-1
)
attention_weights = attention_weights_float.type_as(attention_weights)
attention_probs = torch.nn.functional.dropout(
attention_weights, p=self.dropout, training=self.training
)
# [T, key_length, B, n_head]+ [key_length, B, n_head, d_head]
# -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
if self.use_rpe and rpe is not None and self.rpe_v is not None:
r_v = self.rpe_v(rpe)
attention_rpe = torch.matmul(
attention_probs.transpose(0, 1), r_v
).transpose(0, 1)
if self.rpe_old_option:
attention += attention + attention_rpe
else:
attention = attention + attention_rpe
assert list(attention.shape) == [B * self.num_heads, T, self.head_dim]
attention = attention.transpose(0, 1).contiguous().view(T, B, self.embed_dim)
rc_output_memory = self.out_proj(attention)
return rc_output_memory
@torch.jit.unused
def forward(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
attention_mask: Tensor,
pre_mems: Optional[Tensor] = None,
left_context_key: Optional[Tensor] = None,
left_context_val: Optional[Tensor] = None,
rpe: Optional[Tensor] = None,
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in training.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
attention_mask: attention mask for query = [right_context, query, summary]
key = [mem, right_context, query]. This is only used for traing.
"""
if self.use_mem:
mem_length = mems.size(0)
summary_length = mem_length + 1
if pre_mems is not None:
mems = torch.cat([pre_mems, mems], dim=0)
else:
mem_length = 0
summary_length = 0
# In training, lc_length = 0
if left_context_key is not None:
lc_length = left_context_key.size(0)
else:
lc_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
if left_context_key is not None:
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
else:
new_k = k
new_v = v
next_k = None
next_v = None
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# mask attention
attention_mask = attention_mask.unsqueeze(0)
attention_weights_float = attention_weights_float.masked_fill(
attention_mask, float(self.negative_inf)
)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
if self.use_mem:
# next_m length equals to summary length - 1
# last memory is ignored
if self.mini_batches:
next_m = rc_output_memory[-summary_length:]
else:
next_m = rc_output_memory[-summary_length:-1]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-summary_length]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
next_m = mems
rc_output = rc_output_memory
return rc_output, next_m, next_k, next_v
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
forward function for NoSegAugmentedMemoryMultiheadAttentionBmm in decoding.
args:
input: formed in the following way
[right_context_0, right_contex_1, ..., seg_0, seg_1,
..., summary_0, summary_1,..]
lengths: the length of query which is [seg_0, seg_1, ....]
mems: [mem_0, mem_1, ...].
left_context_key: left_context for key part. This is only used for online
decoding. In training, this is empty tensor
left_context_val: left_context for value part. This is only used for online
decoding. In training, this is empty tensor
"""
lc_length = left_context_key.size(0)
# In decoding, summary_length = 1 or 0
if self.use_mem:
summary_length = 1
else:
summary_length = 0
results = self.prepare_qkv(
input=input,
mems=mems,
lengths=lengths,
summary_length=summary_length,
lc_length=lc_length,
)
result_qkv, input_shape, result_lengths_info, padding_mask = results
q, k, v = result_qkv
(
mem_length,
utterance_length,
right_context_blocks_length,
key_length,
) = result_lengths_info
# add the cache key and value
new_k = torch.cat(
[
k[: mem_length + right_context_blocks_length, :, :],
left_context_key,
k[-utterance_length:, :, :],
],
dim=0,
)
new_v = torch.cat(
[
v[: mem_length + right_context_blocks_length, :, :],
left_context_val,
v[-utterance_length:, :, :],
],
dim=0,
)
next_k = new_k[mem_length + right_context_blocks_length :, :, :]
next_v = new_v[mem_length + right_context_blocks_length :, :, :]
attention_weights, attention_weights_float, v = self.prepare_attention_weights(
q=q,
new_k=new_k,
new_v=new_v,
input_shape=input_shape,
rpe=rpe,
)
# In online decoding, we don't have attention mask. But we still need
# to disable the attention from summary query to memory
attention_weights_float[:, -1, :mem_length] = float(self.negative_inf)
rc_output_memory = self.prepare_attention_output(
attention_weights=attention_weights,
attention_weights_float=attention_weights_float,
v=v,
input_shape=input_shape,
key_length=key_length,
padding_mask=padding_mask,
rpe=rpe,
)
# In decoding, summary length is 1
if self.use_mem:
next_m = rc_output_memory[-1:]
next_m = self.squash_mem(next_m)
# rc and output
rc_output = rc_output_memory[:-1]
if not self.nonlinear_squash_mem:
next_m = torch.clamp(next_m, min=-10, max=10)
else:
rc_output = rc_output_memory
# empty tensor as input mems
next_m = mems
return rc_output, next_m, next_k, next_v
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
class NoSegAugmentedMemoryTransformer(nn.Module):
"""
Whole utterance augmented memory transformer.
This is not pyspeech nn layer. It is used as a module in a master layer where
multiple transformers is used.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
dropout_in_attn=0.0,
dropout_on_attn=None,
dropout_on_fc1=None,
dropout_on_fc2=None,
activation_fn="relu",
tanh_on_mem=False,
std_scale=None,
scaled_init=False,
segment_size=128,
use_mem=True,
mini_batches=False,
negative_inf="-inf",
layer_index=-1,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super(NoSegAugmentedMemoryTransformer, self).__init__()
self.attention = NoSegAugmentedMemoryMultiheadAttentionBmm(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout_in_attn,
scaled_init=scaled_init,
tanh_on_mem=tanh_on_mem,
std_scale=std_scale,
use_mem=use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
max_relative_position=max_relative_position,
)
self.dropout = nn.Dropout(dropout_on_attn)
self.pos_ff = PositionwiseFF(
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
activation_fn=activation_fn,
)
self.layer_norm_pre = Fp32LayerNorm(input_dim)
self.layer_norm = Fp32LayerNorm(input_dim)
self.segment_size = segment_size
self.use_mem = use_mem
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
def set_mini_batches(self, mini_batches):
self.attention.mini_batches = mini_batches
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def pre_attention_ops(self, input, right_context_blocks):
rc_length = right_context_blocks.size(0)
input_length = input.size(0)
rc_and_input = torch.cat([right_context_blocks, input], dim=0)
residual_input = rc_and_input
rc_and_input = self.layer_norm_pre(rc_and_input)
query_input = rc_and_input[-input_length:, :, :]
return rc_length, input_length, residual_input, query_input, rc_and_input
def after_attention_ops(self, attention_output, residual_input):
output = self.dropout(attention_output)
output = output + residual_input
output = self.pos_ff(output)
output = self.layer_norm(output)
return output
@torch.jit.export
def forward_jit(
self,
input: Tensor,
lengths: Tensor,
mems: Tensor,
left_context_key: Tensor,
left_context_val: Tensor,
right_context_blocks: Tensor,
rpe: Optional[Tensor],
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
# In online decoding, the summary query size is always 1 or 0
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
summary_query = summary_query[0:1, :, :]
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention.forward_jit(
input=rc_qu_su,
lengths=lengths,
mems=mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
@torch.jit.unused
def forward(
self,
input,
lengths,
mems,
right_context_blocks,
attention_mask,
pre_mems,
left_context_key,
left_context_val,
rpe,
):
results = self.pre_attention_ops(input, right_context_blocks)
rc_length, input_length, residual_input, query_input, rc_and_input = results
if self.use_mem:
summary_query = self.gen_summary_queries(query_input)
rc_qu_su = torch.cat([rc_and_input, summary_query], dim=0)
else:
rc_qu_su = rc_and_input
rc_output, next_m, next_k, next_v = self.attention(
input=rc_qu_su,
lengths=lengths,
mems=mems,
attention_mask=attention_mask,
pre_mems=pre_mems,
left_context_key=left_context_key,
left_context_val=left_context_val,
rpe=rpe,
)
# [TODO] Note memory did not go through pos_ff. What happen if we pass
# memory through the pos_ff as well?
rc_output = self.after_attention_ops(rc_output, residual_input)
results = (
rc_output[-input_length:, :, :],
next_m,
rc_output[0:rc_length, :, :],
next_k,
next_v,
)
return results
class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder):
"""
Whole utterance augmented memory transformer encoder layer. This is a master layer
where we can define multiple augmented memory transformers. There are two reasons
to setup the master layer.
1. We only need to define once about the attention mask. All the layers in the master
layer share the same mask.
2. pyspeech nn layer has special input and output format. Defining one master layer is
easier to passing memory between different layes inside the master layer
args:
input_dim: input embedding dimension
num_heads: number of heads in multihead self-attention
ffn_dim: ffn dimension in FFN layer
num_layers: number of augmented memory transformer layers
dropout_in_attn: dropout used in multi-head self-attention
dropout_on_attn: dropout used for output from te multihead self-attention
dropout_on_fc1: dropout used in FFN layer for the first linear layer
dropout_on_fc2: dropout used in FFN layer for the second linear layer
segment_size: segment size for each segment
context_config: (left_context_size, right_context_size) defines the surround context size
for each segment
max_memory_size: maximum memory size used for each segment
scaled_init: whether use scaled init for weight initialization in attention layer
std_scale: if std_scale is not None. The weak attention suppression is
turned on. For std_scale = 0.5, all the attention smaller than
mean + 0.5 * std will be suppressed.
activation_fn: activation function used in FFN layer. [ReLU, GELU] supported
tanh_on_mem: whether use tanh on memory
mini_batches: use mini-btach training
negative_inf: the negative infinity value used in attention masking. default is "-inf".
For some situation, e.g. LM. it is better to use "-1e8" to avoid nan issue.
summarization_method: method to generate segment summrization embedding
max_relative_position: max relatie position for relative position embedding
rpe_old_option: To be compatible with previous model. The previous model
was trained with attention += attention + rpe. The correct equation
should be attention = attention + rpe
[TODO]: remove the rpe_old_option by the end of 2021 Q1.
"""
def __init__(
self,
input_dim,
num_heads,
ffn_dim,
num_layers=1,
dropout_in_attn=0.0,
dropout_on_attn=0.0,
dropout_on_fc1=0.0,
dropout_on_fc2=0.0,
segment_size=128,
context_config=(0, 0),
max_memory_size=0,
scaled_init=True,
std_scale=None,
activation_fn="relu",
tanh_on_mem=False,
mini_batches=False,
negative_inf="-inf",
deep_init=True,
summarization_method="mean",
max_relative_position=0,
rpe_old_option=True,
):
super().__init__(None)
if input_dim % num_heads:
raise ValueError(
"input_dim ({}) must be divisible by num_heads ({})".format(
input_dim, num_heads
)
)
# we used to support growing memory size. However, it will cause
# cross stream batching failure. Now we need to have exact max memory size
if max_memory_size < 0:
raise ValueError("max_memory_size must be >= 0")
# Only assign right_context. In decoding, left context will be cached.
# No need to let the online decoder to re-assign the left context
self.left_context, self.right_context = context_config
self.segment_size = segment_size
self.memory_dim = input_dim
self.max_memory_size = max_memory_size
self.mini_batches = mini_batches
if self.max_memory_size != 0:
self.use_mem = True
else:
self.use_mem = False
self.memory_op = SummarizationLayer(
summarization_method, segment_size, input_dim
)
self.layers = torch.nn.ModuleList()
self.num_layers = num_layers
self.max_relative_position = max_relative_position
if self.max_relative_position > 0:
self.use_rpe = True
else:
self.use_rpe = False
for i in range(self.num_layers):
if deep_init:
layer_index = i
else:
layer_index = -1
self.layers.append(
NoSegAugmentedMemoryTransformer(
num_heads=num_heads,
input_dim=input_dim,
ffn_dim=ffn_dim,
dropout_in_attn=dropout_in_attn,
dropout_on_attn=dropout_on_attn,
dropout_on_fc1=dropout_on_fc1,
dropout_on_fc2=dropout_on_fc2,
segment_size=segment_size,
std_scale=std_scale,
activation_fn=activation_fn,
tanh_on_mem=tanh_on_mem,
scaled_init=scaled_init,
use_mem=self.use_mem,
mini_batches=mini_batches,
negative_inf=negative_inf,
layer_index=layer_index,
summarization_method=summarization_method,
max_relative_position=max_relative_position,
rpe_old_option=rpe_old_option,
)
)
def set_mini_batches(self, mini_batches):
# handy function only used for unit test
self.mini_batches = mini_batches
for layer in self.layers:
layer.set_mini_batches(mini_batches)
def _get_relative_position(
self,
input: Tensor,
max_relative_position: int,
left_context_length: int,
past_length: int,
is_decoding: bool,
):
# For training, we copy the right context to the start of the utterance
# First dimension in distance is corresponding to query.
# [right context, utterance, summary vector]
# Second dimension in distance is corresponding to key.
# [Memory bank, right context, utterance]
# For summary vector in query part, the distance with
# all other position is 2*max_position. For memory bank in key,
# the distance with all other positions is 0.
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
# utterance
u_st = past_length * self.segment_size
u_ed = u_st + T
utterance_ranges = torch.arange(u_st, u_ed - self.right_context)
# left context. Only in minibatch or decoding
left_context_ranges = torch.arange(u_st - left_context_length, u_st)
# Right context block
# right context + utterance
right_context_blocks = []
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size + u_st
ed = st + self.right_context
assert ed < u_ed
temp = torch.arange(st, ed)
right_context_blocks.append(temp)
right_context_blocks.append(torch.arange(u_ed - self.right_context, u_ed))
right_context_ranges = torch.cat(right_context_blocks)
if self.use_mem:
# Memory bank
# The position for memory -n, .., -1
if is_decoding:
memory_size = min(past_length, self.max_memory_size)
else:
memory_size = num_segs + past_length - 1
memory_bank_ranges = torch.arange(
-max_relative_position - 1, -max_relative_position - 1 - memory_size, -1
)
# summary vector
# The position for summary vector as the T+max_relative_position+1.
# After the clamping, the relative position is max_relative_position
summary_pos_st = u_ed + max_relative_position + 1
summary_vector_ranges = torch.arange(
summary_pos_st, summary_pos_st + num_segs
)
key_ranges = torch.cat(
[
memory_bank_ranges,
right_context_ranges,
left_context_ranges,
utterance_ranges,
]
)
query_ranges = torch.cat(
[right_context_ranges, utterance_ranges, summary_vector_ranges]
)
else:
key_ranges = torch.cat(
[right_context_ranges, left_context_ranges, utterance_ranges]
)
query_ranges = torch.cat([right_context_ranges, utterance_ranges])
distance = key_ranges[None, :] - query_ranges[:, None]
distance_clamp = (
torch.clamp(distance, -max_relative_position, max_relative_position)
+ max_relative_position
)
distance_clamp = distance_clamp.to(input.device).long().detach()
return distance_clamp
def _get_attention_mask(self, input, past_length=0, left_context_cache=0):
# attention mask for each query contains three parts:
# 1. memory part
# 2. left_context + segment
# 3. right_context_block
# so for each segment and its correspoinding right context block,
# the attention matrix is formed by 9 parts:
# [0, m, 0, 0, right_context, 0, 0, seg, 0]
# [before memory, memory, after memory, before right context, right_context,
# after right context, before seg, seg, after seg]
#
# Query is formed in the way as [right_context_blocks, utterance, summary]
#
# Note: put m and right_context before segment is convenient
# for padding_mask operation.
# Key lengths = m_length + right_context_block_length + lengths
utterance_length, batch_size, _ = input.shape
summary_length = math.ceil(utterance_length / self.segment_size)
num_segs = summary_length
rc_length = self.right_context * num_segs
rc = self.right_context
lc = self.left_context
# using mini-batches, there is left context cache available for current
# sequence.
lcc = left_context_cache
# max_memory_size is 0 then we don't have memory and summary
# past_length is the memory carry from previous sequence
if self.use_mem:
mem_length = num_segs - 1 + past_length
else:
mem_length = 0
rc_mask = []
query_mask = []
summary_mask = []
for j in range(0, num_segs):
ssize = min(self.segment_size, utterance_length - j * self.segment_size)
rc_size = rc
rc_mat = []
q_mat = []
s_mat = []
m_start = max(j + past_length - self.max_memory_size, 0)
# max_memory_size is 0, then we don't use memory
if self.use_mem:
# part 0: before memory
rc_mat.append(input.new_zeros(rc_size, m_start))
q_mat.append(input.new_zeros(ssize, m_start))
s_mat.append(input.new_zeros(1, m_start))
# part 1: memory
col_1 = j + past_length - m_start
rc_mat.append(torch.ones(rc_size, col_1, device=input.device))
q_mat.append(torch.ones(ssize, col_1, device=input.device))
# based on D22875746, disable summary query attention
# on memeory is better for long form utterance
s_mat.append(input.new_zeros(1, col_1))
# part 2: after memory
col_2 = mem_length - (j + past_length)
rc_mat.append(input.new_zeros(rc_size, col_2))
q_mat.append(input.new_zeros(ssize, col_2))
s_mat.append(input.new_zeros(1, col_2))
# part 3: before right context
rc_start = j * rc
rc_mat.append(input.new_zeros(rc_size, rc_start))
q_mat.append(input.new_zeros(ssize, rc_start))
s_mat.append(input.new_zeros(1, rc_start))
# part 4: right context
rc_end = rc_start + rc
col_4 = rc
rc_mat.append(torch.ones(rc_size, col_4, device=input.device))
q_mat.append(torch.ones(ssize, col_4, device=input.device))
s_mat.append(torch.ones(1, col_4, device=input.device))
# part 5: after right context
col_5 = rc_length - rc_end
rc_mat.append(input.new_zeros(rc_size, col_5))
q_mat.append(input.new_zeros(ssize, col_5))
s_mat.append(input.new_zeros(1, col_5))
# part 6: before query segment
seg_start = max(j * self.segment_size + lcc - lc, 0)
rc_mat.append(input.new_zeros(rc_size, seg_start))
q_mat.append(input.new_zeros(ssize, seg_start))
s_mat.append(input.new_zeros(1, seg_start))
# part 7: query segment
# note: right context is put in right context block
# here we only need to consider about left context
seg_end = min((j + 1) * self.segment_size + lcc, utterance_length + lcc)
col_7 = seg_end - seg_start
rc_mat.append(torch.ones(rc_size, col_7, device=input.device))
q_mat.append(torch.ones(ssize, col_7, device=input.device))
s_mat.append(torch.ones(1, col_7, device=input.device))
# part 8: after query segment
col_8 = utterance_length + lcc - seg_end
rc_mat.append(input.new_zeros(rc_size, col_8))
q_mat.append(input.new_zeros(ssize, col_8))
s_mat.append(input.new_zeros(1, col_8))
rc_mask.append(torch.cat(rc_mat, dim=1))
query_mask.append(torch.cat(q_mat, dim=1))
summary_mask.append(torch.cat(s_mat, dim=1))
# no memory, then we don't need summary either
if self.use_mem:
attention_mask = (
1
- torch.cat(
[
torch.cat(rc_mask, dim=0),
torch.cat(query_mask, dim=0),
torch.cat(summary_mask, dim=0),
],
dim=0,
)
).to(torch.bool)
else:
attention_mask = (
1
- torch.cat(
[torch.cat(rc_mask, dim=0), torch.cat(query_mask, dim=0)], dim=0
)
).to(torch.bool)
return attention_mask
@torch.jit.export
def init_state(
self, batch_size: int, device: Optional[Device] = None
) -> List[Tensor]:
empty_memory = torch.zeros(
self.num_layers,
self.max_memory_size,
batch_size,
self.memory_dim,
device=device,
)
left_context_key = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
left_context_val = torch.zeros(
self.num_layers,
self.left_context,
batch_size,
self.memory_dim,
device=device,
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
@torch.jit.export
def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]:
if len(states) == 0:
return []
batched_m = []
batched_lc_key = []
batched_lc_val = []
batched_past_length = []
for state in states:
if len(state) == 0:
continue
m, lc_key, lc_val, past_length = state
batched_m.append(m)
batched_lc_key.append(lc_key)
batched_lc_val.append(lc_val)
batched_past_length.append(past_length)
if (
(len(batched_m) == 0)
or (len(batched_lc_key) == 0)
or (len(batched_lc_val) == 0)
or (len(batched_past_length) == 0)
):
return [
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
torch.tensor([]),
]
batched_m = torch.cat(batched_m, dim=2)
batched_lc_key = torch.cat(batched_lc_key, dim=2)
batched_lc_val = torch.cat(batched_lc_val, dim=2)
batched_past_length = torch.cat(batched_past_length, dim=1)
return [batched_m, batched_lc_key, batched_lc_val, batched_past_length]
@torch.jit.export
def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
if len(state) == 0:
return []
m, lc_key, lc_val, past_length = state
indices = indices.to(device=m.device)
reord_m = torch.index_select(m, 2, indices)
reord_lc_key = torch.index_select(lc_key, 2, indices)
reord_lc_val = torch.index_select(lc_val, 2, indices)
reord_past_length = torch.index_select(past_length, 1, indices)
return [reord_m, reord_lc_key, reord_lc_val, reord_past_length]
@torch.jit.export
def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]:
m, lc_key, lc_val, past_length = state
m = m.index_fill(dim=2, index=indices, value=0.0)
lc_key = lc_key.index_fill(dim=2, index=indices, value=0.0)
lc_val = lc_val.index_fill(dim=2, index=indices, value=0.0)
past_length = past_length.index_fill(dim=1, index=indices, value=0)
return [m, lc_key, lc_val, past_length]
@torch.jit.export
def state_size(self) -> int:
return 4
@torch.jit.export
def batch_size_in_state(
self, state: Optional[List[Tensor]], sloppy: bool = True
) -> Optional[int]:
if state is None:
return None
return state[0].size(2)
def gen_summary_queries(self, input):
sum_input = self.memory_op(input)
return sum_input
def _gen_right_context_padded_input(self, input):
# This function deals with input that is already
# padded with right context (e.g. minibatch training)
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context) / self.segment_size)
for i in range(0, num_segs - 1):
st = (i + 1) * self.segment_size
ed = st + self.right_context
assert ed < T
temp = input[st:ed, :, :]
right_context_blocks.append(temp)
# last segment right context is already available
right_context_blocks.append(input[T - self.right_context :, :, :])
return torch.cat(right_context_blocks, dim=0)
def _gen_segs_right_context(self, input, lengths):
segments = []
T, B, D = input.size()
nT = T - self.right_context
# assume input is right context padded
num_segs = math.ceil(nT / self.segment_size)
# pad zeros to the utterance to make sure each
# segment has the same right context. For the
for i in range(0, num_segs - 1):
st = i * self.segment_size
ed = min(T, st + self.segment_size + self.right_context)
temp = input[st:ed, :, :]
rest_lengths = torch.clamp(
lengths - self.segment_size, min=0, max=nT - (i + 1) * self.segment_size
)
segments.append((temp, lengths - rest_lengths + self.right_context))
lengths = rest_lengths
last_seg = input[st + self.segment_size :, :, :]
segments.append((last_seg, rest_lengths + self.right_context))
return segments
@torch.jit.unused
def forward(
self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
# Xutai: originally the second argument is lengths.
lengths = (~padding_masks).sum(dim=1).long()
# mini batch training.
if self.mini_batches:
return self.forward_mini_batches(input, lengths, state)
# regular full sequence training. Note, assume the right context in provided
# in the input.
T, B, D = input.size()
right_context_blocks = self._gen_right_context_padded_input(input)
# generate the relative positional embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=0,
past_length=0,
is_decoding=False,
)
else:
rpe = None
input = input[: T - self.right_context, :, :]
attention_mask = self._get_attention_mask(input)
# firt layer use each segment mean as memory
# ignore the last one seg average
if self.use_mem:
mems = self.gen_summary_queries(input)[:-1, :, :]
else:
mems = torch.zeros(0, input.size(1), input.size(2), device=input.device)
mems = mems.type_as(input)
output = input
all_outputs = []
for layer in self.layers:
output, mems, right_context_blocks, _, _ = layer(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=None,
left_context_key=None,
left_context_val=None,
rpe=rpe,
)
all_outputs.append(output)
return output, padding_masks, [], all_outputs
def forward_jit_mini_batch_init(
self,
seg: Tensor,
state: Optional[List[Tensor]] = None,
is_decoding: bool = False,
):
# Prepare state. In whole sequence training, state is ignored.
# For minibatch training, we need to prepare state
if state is None:
state = self.init_state(batch_size=seg.size(1), device=seg.device)
if seg.dtype == torch.half:
state = [state[0].half(), state[1].half(), state[2].half(), state[3]]
if self.use_mem:
# note input average only on seg, not on right context
# first layer use each segmetn mean as memory. the last
# one segment average is used in state
full_mems = self.gen_summary_queries(seg)
if is_decoding:
mems = full_mems[0:1, :, :]
state_mems = torch.cat([state[0][0], mems], dim=0)
else:
mems = full_mems[:-1, :, :]
state_mems = torch.cat([state[0][0], full_mems], dim=0)
else:
mems = state[0][0]
state_mems = mems
# track processed segment number or memory number
# the same batch as the same bumber of past length
past_length = state[3][0][0].item()
past_left_context = min(past_length * self.segment_size, self.left_context)
past_length = min(self.max_memory_size, past_length)
return state, mems, state_mems, past_length, past_left_context
def state_update_before(
self, layer: int, state: List[Tensor], past_length: int, past_left_context: int
):
pre_mems = state[0][layer][self.max_memory_size - past_length :, :, :]
lc_key = state[1][layer][self.left_context - past_left_context :, :, :]
lc_val = state[2][layer][self.left_context - past_left_context :, :, :]
return pre_mems, lc_key, lc_val
def state_update_after(
self,
layer: int,
state: List[Tensor],
mems: Tensor,
next_key: Tensor,
next_val: Tensor,
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
):
# mems is used for next layer
if layer < self.num_layers - 1:
state_mems = torch.cat([state[0][layer + 1], mems], dim=0)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
# when mems pass to next sequence, we need the last memory. when mems
# use for the next layer, we can ignore the last memory
mems = mems[:-1, :, :]
# note state[1][i] and state[2][i] original length equals to self.left_context
new_k = torch.cat([state[1][layer], next_key], dim=0)
new_v = torch.cat([state[2][layer], next_val], dim=0)
lc_key_list.append(new_k[-self.left_context :, :, :])
lc_val_list.append(new_v[-self.left_context :, :, :])
return mems_list, lc_key_list, lc_val_list, mems
def state_update_after_loop(
self,
state: List[Tensor],
mems_list: List[Tensor],
lc_key_list: List[Tensor],
lc_val_list: List[Tensor],
update_length: int,
):
state[0] = torch.stack(mems_list, dim=0)
state[1] = torch.stack(lc_key_list, dim=0)
state[2] = torch.stack(lc_val_list, dim=0)
state[3] = state[3] + update_length
return state
@torch.jit.unused
def forward_mini_batches(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]:
T, B, D = input.size()
# input without right context
seg = input[: T - self.right_context, :, :]
# get right context blocks
right_context_blocks = self._gen_right_context_padded_input(input)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, False)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=False,
)
else:
rpe = None
# get attention mask based on seg (not include right context) and available
# left context
attention_mask = self._get_attention_mask(seg, past_length, past_left_context)
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
all_outputs = []
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
pre_mems, lc_key, lc_val = self.state_update_before(
i, state, past_length, past_left_context
)
output, mems, right_context_blocks, next_key, next_val = layer.forward(
input=output,
lengths=lengths,
attention_mask=attention_mask,
mems=mems,
right_context_blocks=right_context_blocks,
pre_mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
all_outputs.append(output)
mems_list, lc_key_list, lc_val_list, mems = self.state_update_after(
layer=i,
state=state,
mems=mems,
next_key=next_key,
next_val=next_val,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
update_length = math.ceil((T - self.right_context) / self.segment_size)
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=update_length,
)
return output, lengths, state, all_outputs
def forward_jit_test(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
This one simulate sequence encoder forward jit. This is for unit test purpose.
It is not used in training or decoding. Note, extra_right_context is set in
the model. In unit test, input = [utterance, right_context], lengths =
[utterance_length].
args:
input: input utterance
lengths: utterance input length
state: None here. input is whole utterance
"""
# [TODO] sequence_to_segment has bug in lengths.
seg_src_tokens_lengths = self._gen_segs_right_context(input, lengths)
seg_enc_tokens_lengths: List[Tuple[Tensor, Tensor]] = []
state: Optional[List[Tensor]] = None
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
seg_enc_tokens, seg_enc_lengths, state = self.forward_jit(
input=seg_src_tokens, lengths=seg_src_lengths, state=state
)
seg_enc_tokens_lengths.append((seg_enc_tokens, seg_enc_lengths))
enc_tokens, enc_lengths = segments_to_sequence(
segments=seg_enc_tokens_lengths, time_axis=0
)
state = [] # returns trivial state
return enc_tokens, enc_lengths, state
@torch.jit.export
def forward_jit(
self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None
) -> Tuple[Tensor, Tensor, List[Tensor]]:
"""
Forward helper for online decoding.
args:
input: [seg, right_context]. We assume in online we
always padding the right context to the preset right context size.
For the last segment, we may have short segment size, but right
context size is the same as other segments
lengths: utterance input length is the utterance segment length and
right context size
state: [memory, left_context_key, left_context_val]. To improve throughput,
in addition to memory, we also cache key and value for left_context in
multihead self-attention
"""
# In online decoding, input = [segment, right_context]
# Lengths = [segment_length, right_context_length]
# so we need strip right context in output
T, B, D = input.size()
rc_str = T - self.right_context
rc_end = T
right_context_blocks = input[rc_str:rc_end, :, :]
seg = input[:rc_str, :, :]
lengths = torch.clamp(lengths - self.right_context, min=0)
mems_list = []
lc_key_list = []
lc_val_list = []
results = self.forward_jit_mini_batch_init(seg, state, True)
state, mems, state_mems, past_length, past_left_context = results
# relative position embedding
if self.use_rpe:
rpe = self._get_relative_position(
input=input,
max_relative_position=self.max_relative_position,
left_context_length=past_left_context,
past_length=past_length,
is_decoding=True,
)
else:
rpe = None
# memory for first layer.
mems_list.append(state_mems[-self.max_memory_size :, :, :])
output = seg
i = 0
for layer in self.layers:
# In order to make cross stream batching work, mem, left context key
# and left context value in the state should always be the same shape.
# We use the past length to track the processed segment number. In this
# way, we take out the essential memory, left context key and left
# context val from the state. After finish the forward for current segment
# we add the new memory, left context key and left context value into the
# staate and trim out the oldest part to keep the shape consistent.
true_mems, lc_key, lc_val = self.state_update_before(
layer=i,
state=state,
past_length=past_length,
past_left_context=past_left_context,
)
output, mems, right_context_blocks, next_key, next_val = layer.forward_jit(
input=output,
lengths=lengths,
mems=true_mems,
right_context_blocks=right_context_blocks,
left_context_key=lc_key,
left_context_val=lc_val,
rpe=rpe,
)
# mems is used for next layer
mems_list, lc_key_list, lc_val_list, _ = self.state_update_after(
layer=i,
state=state,
mems_list=mems_list,
mems=mems,
next_key=next_key,
next_val=next_val,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
)
i += 1
# update state
state = self.state_update_after_loop(
state=state,
mems_list=mems_list,
lc_key_list=lc_key_list,
lc_val_list=lc_val_list,
update_length=1,
)
return output, lengths, state
def quantize_(self, params=None):
if params and "per_channel" in params and params["per_channel"]:
qconfig = per_channel_dynamic_qconfig
else:
qconfig = default_dynamic_qconfig
torch.ao.quantization.quantize_dynamic(
self, {torch.nn.Linear: qconfig}, dtype=torch.qint8, inplace=True
)
return self
# ------------------------------------------------------------------------------
# Emformer encoder for seq2seq model
# This is a wrapper over the original emformer
# ------------------------------------------------------------------------------
def emformer_encoder(klass):
class SpeechEncoder(klass):
def __init__(self, args):
super().__init__(args)
stride = SpeechEncoder.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
def forward(self, src_tokens, src_lengths):
encoder_out = super().forward(src_tokens, src_lengths)
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"][0]
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
encoder_padding_masks = encoder_padding_masks[:, : output.size(0)]
return {
"encoder_out": [output],
"encoder_padding_mask": [encoder_padding_masks],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
SpeechEncoder.__name__ = klass.__name__
return SpeechEncoder
| 68,267
| 36.183007
| 97
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple, List
import torch
import torch.nn.functional as F
from fairseq.models import FairseqEncoder
from fairseq.models.speech_to_text import (
ConvTransformerEncoder,
)
from fairseq.models.speech_to_text.utils import attention_suppression
from fairseq.models.speech_to_text.utils import (
lengths_to_encoder_padding_mask,
segments_to_sequence,
sequence_to_segments,
)
from fairseq.modules import MultiheadAttention, TransformerEncoderLayer
from torch import nn, Tensor
# ------------------------------------------------------------------------------
# AugmentedMemoryConvTransformerEncoder
# ------------------------------------------------------------------------------
class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
args.encoder_stride = self.stride()
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
self.left_context_after_stride = args.left_context // args.encoder_stride
self.right_context_after_stride = args.right_context // args.encoder_stride
self.transformer_layers = nn.ModuleList([])
self.transformer_layers.extend(
[
AugmentedMemoryTransformerEncoderLayer(args)
for i in range(args.encoder_layers)
]
)
def stride(self):
# Hard coded here. Should infer from convs in future
stride = 4
return stride
def forward(self, src_tokens, src_lengths, states=None):
"""Encode input sequence.
:param torch.Tensor xs: input tensor
:param torch.Tensor masks: input mask
:return: position embedded tensor and mask
:rtype Tuple[torch.Tensor, torch.Tensor]:
"""
bsz, max_seq_len, _ = src_tokens.size()
x = (
src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
.transpose(1, 2)
.contiguous()
)
x = self.conv(x)
bsz, _, output_seq_len, _ = x.size()
x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
x = self.out(x)
x = self.embed_scale * x
subsampling_factor = 1.0 * max_seq_len / output_seq_len
input_lengths = torch.max(
(src_lengths.float() / subsampling_factor).ceil().long(),
x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(),
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
# TODO: fix positional embedding
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# State to store memory banks etc.
if states is None:
states = [
{"memory_banks": None, "encoder_states": None}
for i in range(len(self.transformer_layers))
]
for i, layer in enumerate(self.transformer_layers):
# x size:
# (self.left_size + self.segment_size + self.right_size)
# / self.stride, num_heads, dim
# TODO: Consider mask here
x = layer(x, states[i])
states[i]["encoder_states"] = x[
self.left_context_after_stride : -self.right_context_after_stride
]
lengths = (
(
~encoder_padding_mask[
:, self.left_context_after_stride : -self.right_context_after_stride
]
)
.sum(dim=1, keepdim=True)
.long()
)
return states[-1]["encoder_states"], lengths, states
# ------------------------------------------------------------------------------
# AugmentedMemoryTransformerEncoderLayer
# ------------------------------------------------------------------------------
class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args):
super().__init__(args)
self.left_context = args.left_context // args.encoder_stride
self.right_context = args.right_context // args.encoder_stride
def forward(self, x, state):
length, batch_size, x_dim = x.size()
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
# init_state
if state.get("memory_banks", None) is None:
state["memory_banks"] = []
# TODO reseach new sum_query method
seg_start = self.left_context
seg_end = length - self.right_context
if seg_start < seg_end:
summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0)
else:
summarization_query = x.new_zeros(1, batch_size, x_dim)
x = torch.cat([x, summarization_query], dim=0)
x = self.self_attn(input_and_summary=x, state=state)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
def build_self_attention(self, embed_dim, args):
return AugmentedMemoryMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
tanh_on_mem=True,
max_memory_size=args.max_memory_size,
)
# ------------------------------------------------------------------------------
# AugmentedMemoryMultiheadAttention
# ------------------------------------------------------------------------------
class AugmentedMemoryMultiheadAttention(MultiheadAttention):
"""
Augmented Memory Attention from
Streaming Transformer-based Acoustic Models
Using Self-attention with Augmented Memory
https://arxiv.org/abs/2005.08042
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
tanh_on_mem=False,
memory_dim=None,
std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137
max_memory_size=-1,
disable_mem_on_mem_attn=True,
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
q_noise,
qn_block_size,
)
self.memory_dim = memory_dim if memory_dim is not None else embed_dim
self.std_scale = std_scale
self.disable_mem_on_mem_attn = disable_mem_on_mem_attn
# This Operator was used for factorization in PySpeech
self.v2e = lambda x: x
if tanh_on_mem:
self.squash_mem = torch.tanh
self.nonlinear_squash_mem = True
else:
self.squash_mem = lambda x: x
self.nonlinear_squash_mem = False
self.max_memory_size = max_memory_size
def forward(self, input_and_summary, state):
"""
input: Encoder states of current segment with left or right context,
plus one summarization query
"""
length, batch_size, _ = input_and_summary.shape
length = length - 1 # not include sum_query, last index
memory = state["memory_banks"]
# TODO: positional embedding on memory
if self.max_memory_size > -1 and len(memory) > self.max_memory_size:
# TODO: need to fix here
if self.max_memory_size == 0:
memory = memory.new_zeros(1, memory.size(1), self.memory_dim)
else:
memory = memory[-self.max_memory_size :]
memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0)
input_and_sum_query = input_and_summary
q = self.q_proj(self.v2e(input_and_sum_query))
k = self.k_proj(self.v2e(memory_and_input))
v = self.v_proj(self.v2e(memory_and_input))
q = (
q.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
* self.scaling
)
k = (
k.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
v = (
v.contiguous()
.view(-1, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
)
attention_weights = torch.bmm(q, k.transpose(1, 2))
if self.disable_mem_on_mem_attn:
attention_weights = self.suppress_mem_on_mem_attention(
batch_size, self.num_heads, len(memory), attention_weights
)
if self.std_scale is not None:
attention_weights = attention_suppression(attention_weights, self.std_scale)
assert list(attention_weights.shape) == [
batch_size * self.num_heads,
length + 1,
length + len(memory),
]
attention_weights = torch.nn.functional.softmax(
attention_weights.float(), dim=-1
).type_as(attention_weights)
attention_probs = self.dropout_module(attention_weights)
# [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head]
attention = torch.bmm(attention_probs, v)
assert list(attention.shape) == [
batch_size * self.num_heads,
length + 1,
self.head_dim,
]
attention = (
attention.transpose(0, 1)
.contiguous()
.view(length + 1, batch_size, self.embed_dim)
)
output_and_memory = self.out_proj(attention)
next_m = output_and_memory[-1:]
next_m = self.squash_mem(next_m)
output = output_and_memory[:-1]
state["memory_banks"].append(next_m)
return output
def suppress_mem_on_mem_attention(
self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor
):
"""
Arguments:
- B: batch size
- num_heads: number of attention heads
- mem_size: size of memory bank
- attention_weight: a [B*num_heads, T + 1, T + mem_size] vector
Return:
modified attention_weight with [B*num_heads, -1, :mem_size] = -inf
"""
attention_weight[:, -1, :mem_size] = float("-inf")
return attention_weight
# ------------------------------------------------------------------------------
# SequenceEncoder
# ------------------------------------------------------------------------------
class SequenceEncoder(FairseqEncoder):
"""
SequenceEncoder encodes sequences.
More specifically, `src_tokens` and `src_lengths` in `forward()` should
describe a batch of "complete" sequences rather than segments.
Segment-by-segment inference can be triggered by `segment_size`:
1) `segment_size` is None:
SequenceEncoder treats the input sequence as one single segment.
2) `segment_size` is not None (some int instead):
SequenceEncoder does the following:
1. breaks the input sequence into several segments
2. inference on each segment and collect the outputs
3. concatanete segment outputs into the output sequence.
Note that `segment_size` here shouldn't include additional left/right
contexts needed, for example if we wish to infer with LC-BLSTM where the
middle chunk size is 100 and right context is 20, `segment_size` should be
100.
"""
def __init__(self, args, module):
super().__init__(None)
self.module = module
self.input_time_axis = 1
self.output_time_axis = 0
self.segment_size = args.segment_size
self.left_context = args.left_context
self.right_context = args.right_context
def forward(
self,
src_tokens: Tensor,
src_lengths: Tensor,
states=None,
):
seg_src_tokens_lengths = sequence_to_segments(
sequence=src_tokens,
time_axis=self.input_time_axis,
lengths=src_lengths,
segment_size=self.segment_size,
extra_left_context=self.left_context,
extra_right_context=self.right_context,
)
seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = []
for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths))
encoder_out, enc_lengths = segments_to_sequence(
segments=seg_encoder_states_lengths, time_axis=self.output_time_axis
)
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
enc_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
return {
"encoder_out": [encoder_out],
"encoder_padding_mask": [encoder_padding_mask],
"encoder_embedding": [],
"encoder_states": [states],
"src_tokens": [],
"src_lengths": [],
}
def incremental_encode(
self,
seg_src_tokens: Tensor,
seg_src_lengths: Tensor,
states=None,
):
"""
Different from forward function, this function takes segmented speech
as input, and append encoder states to previous states
"""
(seg_encoder_states, seg_enc_lengths, states) = self.module(
seg_src_tokens,
seg_src_lengths,
states=states,
)
return seg_encoder_states, seg_enc_lengths, states
# ------------------------------------------------------------------------------
# Augmented memory model decorator
# ------------------------------------------------------------------------------
def augmented_memory(klass):
class StreamSeq2SeqModel(klass):
@staticmethod
def add_args(parser):
super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser)
parser.add_argument(
"--segment-size", type=int, required=True, help="Length of the segment."
)
parser.add_argument(
"--left-context",
type=int,
default=0,
help="Left context for the segment.",
)
parser.add_argument(
"--right-context",
type=int,
default=0,
help="Right context for the segment.",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
StreamSeq2SeqModel.__name__ = klass.__name__
return StreamSeq2SeqModel
| 16,095
| 32.051335
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/ema/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
from .ema import EMA
def build_ema(model, cfg, device):
return EMA(model, cfg, device)
# automatically import any Python files in the models/ema/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
file_name = file[: file.find(".py")]
importlib.import_module("fairseq.models.ema." + file_name)
| 599
| 27.571429
| 68
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/ema/ema.py
|
#!/usr/bin/env python3
"""
This module has the EMA class used to store a copy of the exponentially decayed
model params.
Typical usage of EMA class involves initializing an object using an existing
model (random or from a seed model) and setting the config like ema_decay,
ema_start_update which determine how the EMA model is updated. After every
update of the model i.e. at the end of the train_step, the EMA should be updated
by passing the new model to the EMA.step function. The EMA model state dict
can be stored in the extra state under the key of "ema" and dumped
into a checkpoint and loaded. The EMA object can be passed to tasks
by setting task.uses_ema property.
EMA is a smoothed/ensemble model which might have better performance
when used for inference or further fine-tuning. EMA class has a
reverse function to load the EMA params into a model and use it
like a regular model.
"""
import copy
import logging
import torch
from fairseq import checkpoint_utils
class EMA(object):
"""Exponential Moving Average of Fairseq Models
EMA keeps a copy of the exponentially decayed model params.
The set of params should include both gradient-descent and
non-gradient descent params, such as batch mean/var and buffers.
This is a modified implementation of
the open source code in https://github.com/zhawe01/fairseq-gec.git,
and internal source code in
fbcode/mobile-vision/projects/classification_pytorch/lib/utils/model_ema.py.
Similar to TF EMA.
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage.
EMA provides a averaged and smoothed set of model weights, and has been shown to
improve vision models. EMA class does all necessary functions to update, reload,
or init EMA methods.
EMA object is initialized from an arbitrary model. By default, it is stored in
the same device (unless device specified at initialization) and with the
same precision as the model (unless ema_fp32 is True). ema_fp32 is recommended.
This stores the EMA parameters in fp32 only for the EMA update step, and
is used at the default precision otherwise.
EMA is usually enabled using EMAConfig with store_ema=True. Some important
parameters to configure EMA are
1) ema_decay - The decay of EMA
2) ema_update_freq - EMA is updated every this many model updates.
3) ema_start_update - Start EMA update after this many model updates [default 0]
Key methods:
1) step - One update of EMA using new model
2) restore - Update EMA from a state dict
3) reverse - Load EMA into a model
4) get_decay, _set_decay - Used to get or set the decay. Note _set_decay is
called from step.
5) build_fp32_params - Used to initialize or update the fp32 copy of EMA params.
Note this is enabled only when ema_fp32=True
"""
def __init__(self, model, config, device=None):
"""
@param model model to initialize the EMA with
@param config EMAConfig object with configuration like
ema_decay, ema_update_freq, ema_fp32
@param device If provided, copy EMA to this device (e.g. gpu).
Otherwise EMA is in the same device as the model.
"""
self.decay = config.ema_decay
self.model = copy.deepcopy(model)
self.model.requires_grad_(False)
self.config = config
self.fp32_params = {}
if self.config.ema_seed_model is not None:
state = checkpoint_utils.load_ema_from_checkpoint(
self.config.ema_seed_model
)
self.model.load_state_dict(state["model"], strict=True)
if device is not None:
logging.info(f"Copying EMA model to device {device}")
self.model = self.model.to(device=device)
if self.config.ema_fp32:
self.build_fp32_params()
self.update_freq_counter = 0
def get_model(self):
return self.model
def build_fp32_params(self, state_dict=None):
"""
Store a copy of the EMA params in fp32.
If state dict is passed, the EMA params is copied from
the provided state dict. Otherwise, it is copied from the
current EMA model parameters.
"""
if not self.config.ema_fp32:
raise RuntimeError(
"build_fp32_params should not be called if ema_fp32=False. "
"Use ema_fp32=True if this is really intended."
)
if state_dict is None:
state_dict = self.model.state_dict()
def _to_float(t):
return t.float() if torch.is_floating_point(t) else t
for param_key in state_dict:
if param_key in self.fp32_params:
self.fp32_params[param_key].copy_(state_dict[param_key])
else:
self.fp32_params[param_key] = _to_float(state_dict[param_key])
def restore(self, state_dict, build_fp32_params=False):
"""Load data from a model spec into EMA model"""
self.model.load_state_dict(state_dict, strict=False)
if build_fp32_params:
self.build_fp32_params(state_dict)
def _set_decay(self, decay):
self.decay = decay
def get_decay(self):
return self.decay
def _step_internal(self, new_model, updates=None):
"""One update of the EMA model based on new model weights"""
decay = self.decay
ema_state_dict = {}
ema_params = (
self.fp32_params if self.config.ema_fp32 else self.model.state_dict()
)
for key, param in new_model.state_dict().items():
try:
ema_param = ema_params[key]
except KeyError:
ema_param = (
param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
)
if param.shape != ema_param.shape:
raise ValueError(
"incompatible tensor shapes between model param and ema param"
+ "{} vs. {}".format(param.shape, ema_param.shape)
)
if "version" in key:
# Do not decay a model.version pytorch param
continue
ema_param.mul_(decay)
ema_param.add_(param.to(dtype=ema_param.dtype), alpha=1 - decay)
ema_state_dict[key] = ema_param
self.restore(ema_state_dict, build_fp32_params=False)
def step(self, new_model, updates=None):
"""
One update of EMA which is done every self.config.ema_update_freq
updates of the model.
@param updates The current number of model updates done.
Decay is set of 0 if model updates < ema_start_update, which means
the model will be simply copied over to the EMA.
When model updates >= ema_start_updates, then EMA is updated with
a decay of self.config.ema_decay.
"""
self._set_decay(
0
if updates is not None and updates < self.config.ema_start_update
else self.config.ema_decay
)
if updates is not None and self.config.ema_update_freq > 1:
self.update_freq_counter += 1
if self.update_freq_counter >= self.config.ema_update_freq:
self._step_internal(new_model, updates)
self.update_freq_counter = 0
else:
self._step_internal(new_model, updates)
def reverse(self, model):
"""
Load the model parameters from EMA model.
Useful for inference or fine-tuning from the EMA model.
"""
model.load_state_dict(self.model.state_dict(), strict=False)
return model
| 7,692
| 38.451282
| 86
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/hub_interface.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
class RobertaHubInterface(nn.Module):
"""A simple PyTorch Hub interface to RoBERTa.
Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta
"""
def __init__(self, cfg, task, model):
super().__init__()
self.cfg = cfg
self.task = task
self.model = model
self.bpe = encoders.build_bpe(cfg.bpe)
# this is useful for determining the device
self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def encode(
self, sentence: str, *addl_sentences, no_separator=False
) -> torch.LongTensor:
"""
BPE-encode a sentence (or multiple sentences).
Every sequence begins with a beginning-of-sentence (`<s>`) symbol.
Every sentence ends with an end-of-sentence (`</s>`) and we use an
extra end-of-sentence (`</s>`) as a separator.
Example (single sentence): `<s> a b c </s>`
Example (sentence pair): `<s> d e f </s> </s> 1 2 3 </s>`
The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE
requires leading spaces. For example::
>>> roberta.encode('Hello world').tolist()
[0, 31414, 232, 2]
>>> roberta.encode(' world').tolist()
[0, 232, 2]
>>> roberta.encode('world').tolist()
[0, 8331, 2]
"""
bpe_sentence = "<s> " + self.bpe.encode(sentence) + " </s>"
for s in addl_sentences:
bpe_sentence += " </s>" if not no_separator else ""
bpe_sentence += " " + self.bpe.encode(s) + " </s>"
tokens = self.task.source_dictionary.encode_line(
bpe_sentence, append_eos=False, add_if_not_exist=False
)
return tokens.long()
def decode(self, tokens: torch.LongTensor):
assert tokens.dim() == 1
tokens = tokens.numpy()
if tokens[0] == self.task.source_dictionary.bos():
tokens = tokens[1:] # remove <s>
eos_mask = tokens == self.task.source_dictionary.eos()
doc_mask = eos_mask[1:] & eos_mask[:-1]
sentences = np.split(tokens, doc_mask.nonzero()[0] + 1)
sentences = [
self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences
]
if len(sentences) == 1:
return sentences[0]
return sentences
def extract_features(
self, tokens: torch.LongTensor, return_all_hiddens: bool = False
) -> torch.Tensor:
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
if tokens.size(-1) > self.model.max_positions():
raise ValueError(
"tokens exceeds maximum length: {} > {}".format(
tokens.size(-1), self.model.max_positions()
)
)
features, extra = self.model(
tokens.to(device=self.device),
features_only=True,
return_all_hiddens=return_all_hiddens,
)
if return_all_hiddens:
# convert from T x B x C -> B x T x C
inner_states = extra["inner_states"]
return [inner_state.transpose(0, 1) for inner_state in inner_states]
else:
return features # just the last layer's features
def register_classification_head(
self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs
):
self.model.register_classification_head(
name, num_classes=num_classes, embedding_size=embedding_size, **kwargs
)
def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False):
features = self.extract_features(tokens.to(device=self.device))
logits = self.model.classification_heads[head](features)
if return_logits:
return logits
return F.log_softmax(logits, dim=-1)
def extract_features_aligned_to_words(
self, sentence: str, return_all_hiddens: bool = False
) -> torch.Tensor:
"""Extract RoBERTa features, aligned to spaCy's word-level tokenizer."""
from fairseq.models.roberta import alignment_utils
from spacy.tokens import Doc
nlp = alignment_utils.spacy_nlp()
tokenizer = alignment_utils.spacy_tokenizer()
# tokenize both with GPT-2 BPE and spaCy
bpe_toks = self.encode(sentence)
spacy_toks = tokenizer(sentence)
spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)]
alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws)
# extract features and align them
features = self.extract_features(
bpe_toks, return_all_hiddens=return_all_hiddens
)
features = features.squeeze(0)
aligned_feats = alignment_utils.align_features_to_words(
self, features, alignment
)
# wrap in spaCy Doc
doc = Doc(
nlp.vocab,
words=["<s>"] + [x.text for x in spacy_toks] + ["</s>"],
spaces=[True]
+ [x.endswith(" ") for x in spacy_toks_ws[:-1]]
+ [True, False],
)
assert len(doc) == aligned_feats.size(0)
doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i]
return doc
def fill_mask(self, masked_input: str, topk: int = 5):
masked_token = "<mask>"
assert (
masked_token in masked_input and masked_input.count(masked_token) == 1
), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format(
masked_token
)
text_spans = masked_input.split(masked_token)
text_spans_bpe = (
(" {0} ".format(masked_token))
.join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans])
.strip()
)
tokens = self.task.source_dictionary.encode_line(
"<s> " + text_spans_bpe + " </s>",
append_eos=False,
add_if_not_exist=False,
)
masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False)
if tokens.dim() == 1:
tokens = tokens.unsqueeze(0)
with utils.model_eval(self.model):
features, extra = self.model(
tokens.long().to(device=self.device),
features_only=False,
return_all_hiddens=False,
)
logits = features[0, masked_index, :].squeeze()
prob = logits.softmax(dim=0)
values, index = prob.topk(k=topk, dim=0)
topk_predicted_token_bpe = self.task.source_dictionary.string(index)
topk_filled_outputs = []
for index, predicted_token_bpe in enumerate(
topk_predicted_token_bpe.split(" ")
):
predicted_token = self.bpe.decode(predicted_token_bpe)
# Quick hack to fix https://github.com/pytorch/fairseq/issues/1306
if predicted_token_bpe.startswith("\u2581"):
predicted_token = " " + predicted_token
if " {0}".format(masked_token) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(
" {0}".format(masked_token), predicted_token
),
values[index].item(),
predicted_token,
)
)
else:
topk_filled_outputs.append(
(
masked_input.replace(masked_token, predicted_token),
values[index].item(),
predicted_token,
)
)
return topk_filled_outputs
def disambiguate_pronoun(self, sentence: str) -> bool:
"""
Usage::
>>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.')
True
>>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.')
'The trophy'
"""
assert hasattr(
self.task, "disambiguate_pronoun"
), "roberta.disambiguate_pronoun() requires a model trained with the WSC task."
with utils.model_eval(self.model):
return self.task.disambiguate_pronoun(
self.model, sentence, use_cuda=self.device.type == "cuda"
)
| 8,857
| 36.533898
| 114
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/model_xlmr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Unsupervised Cross-lingual Representation Learning at Scale
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("xlmr")
class XLMRModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"xlmr.base": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.base.tar.gz",
"xlmr.large": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr.large.tar.gz",
"xlmr.xl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xl.tar.gz",
"xlmr.xxl": "http://dl.fbaipublicfiles.com/fairseq/models/xlmr/xlmr.xxl.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| 1,442
| 29.702128
| 92
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RoBERTa: A Robustly Optimized BERT Pretraining Approach.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import DEFAULT_MIN_PARAMS_TO_WRAP, TransformerEncoder
from fairseq.modules import LayerNorm
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import safe_getattr, safe_hasattr
from .hub_interface import RobertaHubInterface
logger = logging.getLogger(__name__)
@register_model("roberta")
class RobertaModel(FairseqEncoderModel):
@classmethod
def hub_models(cls):
return {
"roberta.base": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.base.tar.gz",
"roberta.large": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.tar.gz",
"roberta.large.mnli": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.mnli.tar.gz",
"roberta.large.wsc": "http://dl.fbaipublicfiles.com/fairseq/models/roberta.large.wsc.tar.gz",
}
def __init__(self, args, encoder):
super().__init__(encoder)
self.args = args
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--encoder-layers", type=int, metavar="L", help="num encoder layers"
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="H",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="F",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="A",
help="num encoder attention heads",
)
parser.add_argument(
"--activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--pooler-activation-fn",
choices=utils.get_available_activation_fns(),
help="activation function to use for pooler layer",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN",
)
parser.add_argument(
"--pooler-dropout",
type=float,
metavar="D",
help="dropout probability in the masked_lm pooler layers",
)
parser.add_argument(
"--max-positions", type=int, help="number of positional embeddings to learn"
)
parser.add_argument(
"--load-checkpoint-heads",
action="store_true",
help="(re-)register and load heads when loading checkpoints",
)
parser.add_argument(
"--untie-weights-roberta",
action="store_true",
help="Untie weights between embeddings and classifiers in RoBERTa",
)
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument(
"--encoder-layerdrop",
type=float,
metavar="D",
default=0,
help="LayerDrop probability for encoder",
)
parser.add_argument(
"--encoder-layers-to-keep",
default=None,
help="which layers to *keep* when pruning as a comma-separated list",
)
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument(
"--quant-noise-pq",
type=float,
metavar="D",
default=0,
help="iterative PQ quantization noise at training time",
)
parser.add_argument(
"--quant-noise-pq-block-size",
type=int,
metavar="D",
default=8,
help="block size of quantization noise at training time",
)
parser.add_argument(
"--quant-noise-scalar",
type=float,
metavar="D",
default=0,
help="scalar quantization noise and scalar quantization at training time",
)
# args for "Better Fine-Tuning by Reducing Representational Collapse" (Aghajanyan et al. 2020)
parser.add_argument(
"--spectral-norm-classification-head",
action="store_true",
default=False,
help="Apply spectral normalization on the classification head",
)
# args for Fully Sharded Data Parallel (FSDP) training
parser.add_argument(
"--min-params-to-wrap",
type=int,
metavar="D",
default=DEFAULT_MIN_PARAMS_TO_WRAP,
help=(
"minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
),
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
from omegaconf import OmegaConf
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, False)
# make sure all arguments are present
base_architecture(args)
if not safe_hasattr(args, "max_positions"):
if not safe_hasattr(args, "tokens_per_sample"):
args.tokens_per_sample = task.max_positions()
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
if OmegaConf.is_config(args):
OmegaConf.set_struct(args, True)
return cls(args, encoder)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs,
):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = RobertaClassificationHead(
input_dim=self.args.encoder_embed_dim,
inner_dim=inner_dim or self.args.encoder_embed_dim,
num_classes=num_classes,
activation_fn=self.args.pooler_activation_fn,
pooler_dropout=self.args.pooler_dropout,
q_noise=self.args.quant_noise_pq,
qn_block_size=self.args.quant_noise_pq_block_size,
do_spectral_norm=self.args.spectral_norm_classification_head,
)
@property
def supported_targets(self):
return {"self"}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
logger.info(x["args"])
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
# rename decoder -> encoder before upgrading children modules
for k in list(state_dict.keys()):
if k.startswith(prefix + "decoder"):
new_k = prefix + "encoder" + k[len(prefix + "decoder") :]
state_dict[new_k] = state_dict[k]
del state_dict[k]
# rename emb_layer_norm -> layernorm_embedding
for k in list(state_dict.keys()):
if ".emb_layer_norm." in k:
new_k = k.replace(".emb_layer_norm.", ".layernorm_embedding.")
state_dict[new_k] = state_dict[k]
del state_dict[k]
# upgrade children modules
super().upgrade_state_dict_named(state_dict, name)
# Handle new classification heads present in the state dict.
current_head_names = (
[]
if not hasattr(self, "classification_heads")
else self.classification_heads.keys()
)
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + "classification_heads."):
continue
head_name = k[len(prefix + "classification_heads.") :].split(".")[0]
num_classes = state_dict[
prefix + "classification_heads." + head_name + ".out_proj.weight"
].size(0)
inner_dim = state_dict[
prefix + "classification_heads." + head_name + ".dense.weight"
].size(0)
if getattr(self.args, "load_checkpoint_heads", False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
logger.warning(
"deleting classification head ({}) from checkpoint "
"not present in current model: {}".format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes
!= self.classification_heads[head_name].out_proj.out_features
or inner_dim
!= self.classification_heads[head_name].dense.out_features
):
logger.warning(
"deleting classification head ({}) from checkpoint "
"with different dimensions than current model: {}".format(
head_name, k
)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, "classification_heads"):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + "classification_heads." + k not in state_dict:
logger.info("Overwriting " + prefix + "classification_heads." + k)
state_dict[prefix + "classification_heads." + k] = v
class RobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the masked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
q_noise=0,
qn_block_size=8,
do_spectral_norm=False,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = apply_quant_noise_(
nn.Linear(inner_dim, num_classes), q_noise, qn_block_size
)
if do_spectral_norm:
if q_noise != 0:
raise NotImplementedError(
"Attempting to use Spectral Normalization with Quant Noise. This is not officially supported"
)
self.out_proj = torch.nn.utils.spectral_norm(self.out_proj)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class RobertaEncoder(FairseqEncoder):
"""RoBERTa encoder."""
def __init__(self, args, dictionary):
super().__init__(dictionary)
# set any missing default values
base_architecture(args)
self.args = args
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
embed_tokens = self.build_embedding(
len(dictionary), args.encoder_embed_dim, dictionary.pad()
)
self.sentence_encoder = self.build_encoder(args, dictionary, embed_tokens)
self.lm_head = self.build_lm_head(
embed_dim=args.encoder_embed_dim,
output_dim=len(dictionary),
activation_fn=args.activation_fn,
weight=(
self.sentence_encoder.embed_tokens.weight
if not args.untie_weights_roberta
else None
),
)
def build_embedding(self, vocab_size, embedding_dim, padding_idx):
return nn.Embedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, args, dictionary, embed_tokens):
encoder = TransformerEncoder(args, dictionary, embed_tokens)
encoder.apply(init_bert_params)
return encoder
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return RobertaLMHead(embed_dim, output_dim, activation_fn, weight)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
masked_tokens=None,
**unused,
):
"""
Args:
src_tokens (LongTensor): input tokens of shape `(batch, src_len)`
features_only (bool, optional): skip LM head and just return
features. If True, the output will be of shape
`(batch, src_len, embed_dim)`.
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
tuple:
- the LM output of shape `(batch, src_len, vocab)`
- a dictionary of additional data, where 'inner_states'
is a list of hidden states. Note that the hidden
states have shape `(src_len, batch, vocab)`.
"""
x, extra = self.extract_features(
src_tokens, return_all_hiddens=return_all_hiddens
)
if not features_only:
x = self.output_layer(x, masked_tokens=masked_tokens)
return x, extra
def extract_features(self, src_tokens, return_all_hiddens=False, **kwargs):
encoder_out = self.sentence_encoder(
src_tokens,
return_all_hiddens=return_all_hiddens,
token_embeddings=kwargs.get("token_embeddings", None),
)
# T x B x C -> B x T x C
features = encoder_out["encoder_out"][0].transpose(0, 1)
inner_states = encoder_out["encoder_states"] if return_all_hiddens else None
return features, {"inner_states": inner_states}
def output_layer(self, features, masked_tokens=None, **unused):
return self.lm_head(features, masked_tokens)
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
@register_model_architecture("roberta", "roberta")
def base_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 12)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 3072)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 12)
args.dropout = safe_getattr(args, "dropout", 0.1)
args.attention_dropout = safe_getattr(args, "attention_dropout", 0.1)
args.activation_dropout = safe_getattr(args, "activation_dropout", 0.0)
args.pooler_dropout = safe_getattr(args, "pooler_dropout", 0.0)
args.max_source_positions = safe_getattr(args, "max_positions", 512)
args.no_token_positional_embeddings = safe_getattr(
args, "no_token_positional_embeddings", False
)
# BERT has a few structural differences compared to the original Transformer
args.encoder_learned_pos = safe_getattr(args, "encoder_learned_pos", True)
args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", True)
args.no_scale_embedding = safe_getattr(args, "no_scale_embedding", True)
args.activation_fn = safe_getattr(args, "activation_fn", "gelu")
args.encoder_normalize_before = safe_getattr(
args, "encoder_normalize_before", False
)
args.pooler_activation_fn = safe_getattr(args, "pooler_activation_fn", "tanh")
args.untie_weights_roberta = safe_getattr(args, "untie_weights_roberta", False)
# Adaptive input config
args.adaptive_input = safe_getattr(args, "adaptive_input", False)
# LayerDrop config
args.encoder_layerdrop = safe_getattr(args, "encoder_layerdrop", 0.0)
args.encoder_layers_to_keep = safe_getattr(args, "encoder_layers_to_keep", None)
# Quantization noise config
args.quant_noise_pq = safe_getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = safe_getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = safe_getattr(args, "quant_noise_scalar", 0)
# R4F config
args.spectral_norm_classification_head = safe_getattr(
args, "spectral_norm_classification_head", False
)
@register_model_architecture("roberta", "roberta_prenorm")
def roberta_prenorm_architecture(args):
args.layernorm_embedding = safe_getattr(args, "layernorm_embedding", False)
args.encoder_normalize_before = safe_getattr(args, "encoder_normalize_before", True)
base_architecture(args)
@register_model_architecture("roberta", "roberta_base")
def roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("roberta", "roberta_large")
def roberta_large_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("roberta", "xlm")
def xlm_architecture(args):
args.encoder_layers = safe_getattr(args, "encoder_layers", 16)
args.encoder_embed_dim = safe_getattr(args, "encoder_embed_dim", 1280)
args.encoder_ffn_embed_dim = safe_getattr(args, "encoder_ffn_embed_dim", 1280 * 4)
args.encoder_attention_heads = safe_getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
| 22,452
| 36.609715
| 113
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/alignment_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from typing import List
import torch
def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]):
"""
Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy).
Args:
roberta (RobertaHubInterface): RoBERTa instance
bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)`
other_tokens (List[str]): other tokens of shape `(T_words)`
Returns:
List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*.
"""
assert bpe_tokens.dim() == 1
assert bpe_tokens[0] == 0
def clean(text):
return text.strip()
# remove whitespaces to simplify alignment
bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens]
bpe_tokens = [
clean(roberta.bpe.decode(x) if x not in {"<s>", ""} else x) for x in bpe_tokens
]
other_tokens = [clean(str(o)) for o in other_tokens]
# strip leading <s>
bpe_tokens = bpe_tokens[1:]
assert "".join(bpe_tokens) == "".join(other_tokens)
# create alignment from every word to a list of BPE tokens
alignment = []
bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1))
j, bpe_tok = next(bpe_toks)
for other_tok in other_tokens:
bpe_indices = []
while True:
if other_tok.startswith(bpe_tok):
bpe_indices.append(j)
other_tok = other_tok[len(bpe_tok) :]
try:
j, bpe_tok = next(bpe_toks)
except StopIteration:
j, bpe_tok = None, None
elif bpe_tok.startswith(other_tok):
# other_tok spans multiple BPE tokens
bpe_indices.append(j)
bpe_tok = bpe_tok[len(other_tok) :]
other_tok = ""
else:
raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok))
if other_tok == "":
break
assert len(bpe_indices) > 0
alignment.append(bpe_indices)
assert len(alignment) == len(other_tokens)
return alignment
def align_features_to_words(roberta, features, alignment):
"""
Align given features to words.
Args:
roberta (RobertaHubInterface): RoBERTa instance
features (torch.Tensor): features to align of shape `(T_bpe x C)`
alignment: alignment between BPE tokens and words returned by
func:`align_bpe_to_words`.
"""
assert features.dim() == 2
bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices)
assert bpe_counts[0] == 0 # <s> shouldn't be aligned
denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))])
weighted_features = features / denom.unsqueeze(-1)
output = [weighted_features[0]]
largest_j = -1
for bpe_indices in alignment:
output.append(weighted_features[bpe_indices].sum(dim=0))
largest_j = max(largest_j, *bpe_indices)
for j in range(largest_j + 1, len(features)):
output.append(weighted_features[j])
output = torch.stack(output)
assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4)
return output
def spacy_nlp():
if getattr(spacy_nlp, "_nlp", None) is None:
try:
from spacy.lang.en import English
spacy_nlp._nlp = English()
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_nlp._nlp
def spacy_tokenizer():
if getattr(spacy_tokenizer, "_tokenizer", None) is None:
try:
nlp = spacy_nlp()
spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp)
except ImportError:
raise ImportError("Please install spacy with: pip install spacy")
return spacy_tokenizer._tokenizer
| 4,091
| 33.386555
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/enc_dec.py
|
import argparse
import logging
import torch.nn as nn
import fairseq.checkpoint_utils
from fairseq.models import (
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerDecoder
from fairseq.models.roberta import model as roberta
logger = logging.getLogger(__name__)
@register_model("roberta_enc_dec")
class RobertaEncDecModel(FairseqEncoderDecoderModel):
@staticmethod
def add_args(parser):
parser.add_argument(
"--pretrained-mlm-checkpoint",
default=None,
type=str,
metavar="PRETRAINED",
help="path to pretrained mlm checkpoint",
)
parser.add_argument(
"--pretrained-decoder", action="store_true", help="reload decoder"
)
parser.add_argument(
"--hack-layernorm-embedding",
action="store_true",
help="hack to reload old models trained with encoder-normalize-before=False (no equivalent to encoder-normalize-before=False and layernorm_embedding=False",
)
parser.add_argument(
"--share-decoder-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--share-all-embeddings",
action="store_true",
help="share encoder, decoder and output embeddings"
" (requires shared dictionary and embed dim)",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_enc_dec_architecture(args)
if args.pretrained_mlm_checkpoint:
arg_overrides = None
if args.hack_layernorm_embedding:
arg_overrides = {"layernorm_embedding": False}
loaded = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[args.pretrained_mlm_checkpoint], arg_overrides=arg_overrides
)
([roberta_enc], _cfg, _task) = loaded
else:
# Do we need to edit untie_weights here ?
share_in_out = (
args.share_decoder_input_output_embed or args.share_all_embeddings
)
args.untie_weights_roberta = not share_in_out
if args.hack_layernorm_embedding:
args.layernorm_embedding = False
args.encoder_normalize_before = False
roberta_enc = roberta.RobertaModel.build_model(args, task)
return cls.from_roberta(roberta_enc, args, task.source_dictionary)
@staticmethod
def from_roberta(roberta_enc: roberta.RobertaModel, args, dictionary):
encoder = roberta_enc.encoder.sentence_encoder
vocab_size, embed_dim = encoder.embed_tokens.weight.shape
if args.share_all_embeddings:
lm_head = roberta_enc.encoder.lm_head
assert encoder.embed_tokens.weight is lm_head.weight, (
"Can't use --share-all-embeddings with a model "
"that was pretraiend with --untie-weights-roberta_enc"
)
else:
lm_head = roberta.RobertaLMHead(
embed_dim, vocab_size, roberta_enc.args.activation_fn
)
dec_embs = nn.Embedding(vocab_size, embed_dim, dictionary.pad())
if args.share_all_embeddings or args.share_decoder_input_output_embed:
# Note: I wasn't able to use Embedding _weight parameter to achive this sharing.
dec_embs.weight = lm_head.weight
decoder = TransformerDecoder(
RobertaEncDecModel.read_args_from_roberta(roberta_enc.args),
dictionary,
dec_embs,
no_encoder_attn=False,
output_projection=lm_head,
)
if getattr(args, "pretrained_decoder", False):
decoder_dict = encoder.state_dict()
# TODO: hide setting "encoder_attn" layers behind a flag.
for k, w in list(decoder_dict.items()):
if ".self_attn" in k:
k_enc_attn = k.replace(".self_attn", ".encoder_attn")
decoder_dict[k_enc_attn] = w.detach().clone()
for k, w in lm_head.state_dict().items():
decoder_dict["output_projection." + k] = w
missing_keys, unexpected_keys = decoder.load_state_dict(
decoder_dict, strict=False
)
# missing_keys = [m for m in missing_keys if ".encoder_attn" not in m]
assert not missing_keys and not unexpected_keys, (
"Failed to load state dict. "
f"Missing keys: {missing_keys}. "
f"Unexpected keys: {unexpected_keys}."
)
if args.share_all_embeddings:
assert decoder.output_projection.weight is decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is decoder.embed_tokens.weight
elif args.share_decoder_input_output_embed:
assert decoder.output_projection.weight is decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
else:
assert decoder.output_projection.weight is not decoder.embed_tokens.weight
assert encoder.embed_tokens.weight is not decoder.embed_tokens.weight
return RobertaEncDecModel(encoder, decoder)
@staticmethod
def read_args_from_roberta(roberta_args: argparse.Namespace):
# TODO: this would become easier if encoder/decoder where using a similar
# TransformerConfig object
args = argparse.Namespace(**vars(roberta_args))
attr_map = [
("encoder_attention_heads", "decoder_attention_heads"),
("encoder_embed_dim", "decoder_embed_dim"),
("encoder_embed_dim", "decoder_output_dim"),
("encoder_normalize_before", "decoder_normalize_before"),
("encoder_layers_to_keep", "decoder_layers_to_keep"),
("encoder_ffn_embed_dim", "decoder_ffn_embed_dim"),
("encoder_layerdrop", "decoder_layerdrop"),
("encoder_layers", "decoder_layers"),
("encoder_learned_pos", "decoder_learned_pos"),
# should this be set from here ?
("max_positions", "max_target_positions"),
]
for k1, k2 in attr_map:
setattr(args, k2, getattr(roberta_args, k1))
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = not roberta_args.untie_weights_roberta
return args
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
super().upgrade_state_dict_named(state_dict, name)
old_keys = list(state_dict.keys())
# rename decoder -> encoder before upgrading children modules
for k in old_keys:
if k.startswith(prefix + "encoder.lm_head"):
state_dict.pop(k)
continue
new_k = k
new_k = new_k.replace(".sentence_encoder.", ".")
new_k = new_k.replace("decoder.lm_head.", "decoder.output_projection.")
if k == new_k:
continue
# print(k, "->", new_k)
state_dict[new_k] = state_dict.pop(k)
@register_model_architecture("roberta_enc_dec", "roberta_enc_dec")
def base_enc_dec_architecture(args):
args.hack_layernorm_embedding = getattr(args, "hack_layernorm_embedding", False)
args.pretrained_mlm_checkpoint = getattr(args, "pretrained_mlm_checkpoint", None)
args.pretrained_decoder = getattr(args, "pretrained_decoder", None)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
roberta.base_architecture(args)
| 8,076
| 40.849741
| 168
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hub_interface import * # noqa
from .model import * # noqa
from .enc_dec import * # noqa
from .model_camembert import * # noqa
from .model_gottbert import * # noqa
from .model_xlmr import * # noqa
| 386
| 31.25
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/model_gottbert.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
GottBERT: a pure German Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("gottbert")
class GottbertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"gottbert-base": "https://dl.gottbert.de/fairseq/models/gottbert-base.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="hf_byte_bpe",
bpe_vocab="vocab.json",
bpe_merges="merges.txt",
bpe_add_prefix_space=False,
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
bpe_vocab=bpe_vocab,
bpe_merges=bpe_merges,
bpe_add_prefix_space=bpe_add_prefix_space,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| 1,376
| 26.54
| 90
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/roberta/model_camembert.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
CamemBERT: a Tasty French Language Model
"""
from fairseq.models import register_model
from .hub_interface import RobertaHubInterface
from .model import RobertaModel
@register_model("camembert")
class CamembertModel(RobertaModel):
@classmethod
def hub_models(cls):
return {
"camembert": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert.v0": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-base": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base.tar.gz",
"camembert-large": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-large.tar.gz",
"camembert-base-ccnet": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet.tar.gz",
"camembert-base-ccnet-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-ccnet-4gb.tar.gz",
"camembert-base-wikipedia-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-wikipedia-4gb.tar.gz",
"camembert-base-oscar-4gb": "http://dl.fbaipublicfiles.com/fairseq/models/camembert-base-oscar-4gb.tar.gz",
}
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
**kwargs
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return RobertaHubInterface(x["args"], x["task"], x["models"][0])
| 1,942
| 37.098039
| 127
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/tts_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch
from torch import nn
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import TransformerEncoderLayer, TransformerDecoderLayer
from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet
from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq import utils
logger = logging.getLogger(__name__)
def encoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
def Embedding(num_embeddings, embedding_dim):
m = nn.Embedding(num_embeddings, embedding_dim)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
return m
class TTSTransformerEncoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.padding_idx = src_dict.pad()
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_tokens = nn.Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
assert args.encoder_conv_kernel_size % 2 == 1
self.prenet = nn.ModuleList(
nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.encoder_conv_kernel_size,
padding=((args.encoder_conv_kernel_size - 1) // 2),
),
nn.BatchNorm1d(args.encoder_embed_dim),
nn.ReLU(),
nn.Dropout(args.encoder_dropout),
)
for _ in range(args.encoder_conv_layers)
)
self.prenet_proj = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.transformer_layers = nn.ModuleList(
TransformerEncoderLayer(args)
for _ in range(args.encoder_transformer_layers)
)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
self.apply(encoder_init)
def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):
x = self.embed_tokens(src_tokens)
x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T
for conv in self.prenet:
x = conv(x)
x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C
x = self.prenet_proj(x)
padding_mask = src_tokens.eq(self.padding_idx)
positions = self.embed_positions(padding_mask)
x += self.pos_emb_alpha * positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
for layer in self.transformer_layers:
x = layer(x, padding_mask)
if self.layer_norm is not None:
x = self.layer_norm(x)
if self.embed_speaker is not None:
seq_len, bsz, _ = x.size()
emb = self.embed_speaker(speaker).transpose(0, 1)
emb = emb.expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [padding_mask]
if padding_mask.any()
else [], # B x T
"encoder_embedding": [], # B x T x C
"encoder_states": [], # List[T x B x C]
"src_tokens": [],
"src_lengths": [],
}
def decoder_init(m):
if isinstance(m, torch.nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class TTSTransformerDecoder(FairseqIncrementalDecoder):
def __init__(self, args, src_dict):
super().__init__(None)
self._future_mask = torch.empty(0)
self.args = args
self.padding_idx = src_dict.pad()
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.embed_positions = PositionalEmbedding(
args.max_target_positions, args.decoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.prenet = nn.Sequential(
Prenet(
self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout
),
nn.Linear(args.prenet_dim, args.decoder_embed_dim),
)
self.n_transformer_layers = args.decoder_transformer_layers
self.transformer_layers = nn.ModuleList(
TransformerDecoderLayer(args) for _ in range(self.n_transformer_layers)
)
if args.decoder_normalize_before:
self.layer_norm = LayerNorm(args.decoder_embed_dim)
else:
self.layer_norm = None
self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)
self.eos_proj = nn.Linear(args.decoder_embed_dim, 1)
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))
self.apply(decoder_init)
def extract_features(
self,
prev_outputs,
encoder_out=None,
incremental_state=None,
target_lengths=None,
speaker=None,
**kwargs
):
alignment_layer = self.n_transformer_layers - 1
self_attn_padding_mask = lengths_to_padding_mask(target_lengths)
positions = self.embed_positions(
self_attn_padding_mask, incremental_state=incremental_state
)
if incremental_state is not None:
prev_outputs = prev_outputs[:, -1:, :]
self_attn_padding_mask = self_attn_padding_mask[:, -1:]
if positions is not None:
positions = positions[:, -1:]
x = self.prenet(prev_outputs)
x += self.pos_emb_alpha * positions
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if not self_attn_padding_mask.any():
self_attn_padding_mask = None
attn: Optional[torch.Tensor] = None
inner_states: List[Optional[torch.Tensor]] = [x]
for idx, transformer_layer in enumerate(self.transformer_layers):
if incremental_state is None:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = transformer_layer(
x,
encoder_out["encoder_out"][0]
if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0)
else None,
encoder_out["encoder_padding_mask"][0]
if (
encoder_out is not None
and len(encoder_out["encoder_padding_mask"]) > 0
)
else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
# average probabilities over heads, transpose to
# (B, src_len, tgt_len)
attn = attn.mean(dim=0).transpose(2, 1)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
target_lengths=None,
speaker=None,
**kwargs
):
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
target_lengths=target_lengths,
speaker=speaker,
**kwargs
)
attn = extra["attn"]
feat_out = self.feat_proj(x)
bsz, seq_len, _ = x.size()
eos_out = self.eos_proj(x)
post_feat_out = feat_out + self.postnet(feat_out)
return post_feat_out, eos_out, {"attn": attn, "feature_out": feat_out}
def get_normalized_probs(self, net_output, log_probs, sample):
logits = self.ctc_proj(net_output[2]["feature_out"])
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
@register_model("tts_transformer")
class TTSTransformerModel(FairseqEncoderDecoderModel):
"""
Implementation for https://arxiv.org/pdf/1809.08895.pdf
"""
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# encoder prenet
parser.add_argument("--encoder-dropout", type=float)
parser.add_argument("--encoder-conv-layers", type=int)
parser.add_argument("--encoder-conv-kernel-size", type=int)
# encoder transformer layers
parser.add_argument("--encoder-transformer-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-ffn-embed-dim", type=int)
parser.add_argument("--encoder-normalize-before", action="store_true")
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--activation-dropout", "--relu-dropout", type=float)
parser.add_argument("--activation-fn", type=str, default="relu")
# decoder prenet
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
# decoder postnet
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# decoder transformer layers
parser.add_argument("--decoder-transformer-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-ffn-embed-dim", type=int)
parser.add_argument("--decoder-normalize-before", action="store_true")
parser.add_argument("--decoder-attention-heads", type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_updates = 0
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker)
decoder = TTSTransformerDecoder(args, task.src_dict)
return cls(encoder, decoder)
def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs):
return self.encoder(
src_tokens, src_lengths=src_lengths, speaker=speaker, **kwargs
)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
@register_model_architecture("tts_transformer", "tts_transformer")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64)
# encoder prenet
args.encoder_dropout = getattr(args, "encoder_dropout", 0.5)
args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3)
args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5)
# encoder transformer layers
args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(
args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim
)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
# decoder prenet
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
# decoder postnet
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# decoder transformer layers
args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim
)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
| 15,432
| 37.5825
| 104
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/vocoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import json
from typing import Dict
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from fairseq.data.audio.audio_utils import (
get_window,
get_fourier_basis,
get_mel_filters,
TTSSpectrogram,
)
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel
logger = logging.getLogger(__name__)
class PseudoInverseMelScale(torch.nn.Module):
def __init__(self, n_stft, n_mels, sample_rate, f_min, f_max) -> None:
super(PseudoInverseMelScale, self).__init__()
self.n_mels = n_mels
basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max)
basis = torch.pinverse(basis) # F x F_mel
self.register_buffer("basis", basis)
def forward(self, melspec: torch.Tensor) -> torch.Tensor:
# pack batch
shape = melspec.shape # B_1 x ... x B_K x F_mel x T
n_mels, time = shape[-2], shape[-1]
melspec = melspec.view(-1, n_mels, time)
freq, _ = self.basis.size() # F x F_mel
assert self.n_mels == n_mels, (self.n_mels, n_mels)
specgram = self.basis.matmul(melspec).clamp(min=0)
# unpack batch
specgram = specgram.view(shape[:-2] + (freq, time))
return specgram
class GriffinLim(torch.nn.Module):
def __init__(
self,
n_fft: int,
win_length: int,
hop_length: int,
n_iter: int,
window_fn=torch.hann_window,
):
super(GriffinLim, self).__init__()
self.transform = TTSSpectrogram(
n_fft, win_length, hop_length, return_phase=True
)
basis = get_fourier_basis(n_fft)
basis = torch.pinverse(n_fft / hop_length * basis).T[:, None, :]
basis *= get_window(window_fn, n_fft, win_length)
self.register_buffer("basis", basis)
self.n_fft = n_fft
self.win_length = win_length
self.hop_length = hop_length
self.n_iter = n_iter
self.tiny = 1.1754944e-38
@classmethod
def get_window_sum_square(
cls, n_frames, hop_length, win_length, n_fft, window_fn=torch.hann_window
) -> torch.Tensor:
w_sq = get_window(window_fn, n_fft, win_length) ** 2
n = n_fft + hop_length * (n_frames - 1)
x = torch.zeros(n, dtype=torch.float32)
for i in range(n_frames):
ofst = i * hop_length
x[ofst : min(n, ofst + n_fft)] += w_sq[: max(0, min(n_fft, n - ofst))]
return x
def inverse(self, magnitude: torch.Tensor, phase) -> torch.Tensor:
x = torch.cat(
[magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1
)
x = F.conv_transpose1d(x, self.basis, stride=self.hop_length)
win_sum_sq = self.get_window_sum_square(
magnitude.shape[-1],
hop_length=self.hop_length,
win_length=self.win_length,
n_fft=self.n_fft,
).to(magnitude.device)
# remove modulation effects
approx_nonzero_indices = win_sum_sq > self.tiny
x[:, :, approx_nonzero_indices] /= win_sum_sq[approx_nonzero_indices]
x *= self.n_fft / self.hop_length
x = x[:, :, self.n_fft // 2 :]
x = x[:, :, : -self.n_fft // 2 :]
return x
def forward(self, specgram: torch.Tensor) -> torch.Tensor:
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*specgram.shape)))
angles = torch.from_numpy(angles).to(specgram)
_specgram = specgram.view(-1, specgram.shape[-2], specgram.shape[-1])
waveform = self.inverse(_specgram, angles).squeeze(1)
for _ in range(self.n_iter):
_, angles = self.transform(waveform)
waveform = self.inverse(_specgram, angles).squeeze(1)
return waveform.squeeze(0)
class GriffinLimVocoder(nn.Module):
def __init__(
self,
sample_rate,
win_size,
hop_size,
n_fft,
n_mels,
f_min,
f_max,
window_fn,
spec_bwd_max_iter=32,
fp16=False,
):
super().__init__()
self.inv_mel_transform = PseudoInverseMelScale(
n_stft=n_fft // 2 + 1,
n_mels=n_mels,
sample_rate=sample_rate,
f_min=f_min,
f_max=f_max,
)
self.gl_transform = GriffinLim(
n_fft=n_fft,
win_length=win_size,
hop_length=hop_size,
window_fn=window_fn,
n_iter=spec_bwd_max_iter,
)
if fp16:
self.half()
self.inv_mel_transform.half()
self.gl_transform.half()
else:
self.float()
self.inv_mel_transform.float()
self.gl_transform.float()
def forward(self, x):
# x: (B x) T x D -> (B x) 1 x T
# NOTE: batched forward produces noisier waveform. recommend running
# one utterance at a time
self.eval()
x = x.exp().transpose(-1, -2)
x = self.inv_mel_transform(x)
x = self.gl_transform(x)
return x
@classmethod
def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
feat_cfg = data_cfg.config["features"]
window_fn = getattr(torch, feat_cfg["window_fn"] + "_window")
return cls(
sample_rate=feat_cfg["sample_rate"],
win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]),
hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]),
n_fft=feat_cfg["n_fft"],
n_mels=feat_cfg["n_mels"],
f_min=feat_cfg["f_min"],
f_max=feat_cfg["f_max"],
window_fn=window_fn,
spec_bwd_max_iter=args.spec_bwd_max_iter,
fp16=args.fp16,
)
class HiFiGANVocoder(nn.Module):
def __init__(
self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False
) -> None:
super().__init__()
self.model = HiFiGANModel(model_cfg)
state_dict = torch.load(checkpoint_path)
self.model.load_state_dict(state_dict["generator"])
if fp16:
self.model.half()
logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}")
def forward(self, x: torch.Tensor) -> torch.Tensor:
# (B x) T x D -> (B x) 1 x T
model = self.model.eval()
if len(x.shape) == 2:
return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0)
else:
return model(x.transpose(-1, -2)).detach()
@classmethod
def from_data_cfg(cls, args, data_cfg: S2TDataConfig):
vocoder_cfg = data_cfg.vocoder
assert vocoder_cfg.get("type", "griffin_lim") == "hifigan"
with open(vocoder_cfg["config"]) as f:
model_cfg = json.load(f)
return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16)
def get_vocoder(args, data_cfg: S2TDataConfig):
if args.vocoder == "griffin_lim":
return GriffinLimVocoder.from_data_cfg(args, data_cfg)
elif args.vocoder == "hifigan":
return HiFiGANVocoder.from_data_cfg(args, data_cfg)
else:
raise ValueError("Unknown vocoder")
| 7,429
| 32.772727
| 84
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/tacotron2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from torch import nn
from torch.nn import functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from fairseq.modules import LSTMCellWithZoneOut, LocationAttention
logger = logging.getLogger(__name__)
def encoder_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
class Tacotron2Encoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.padding_idx = src_dict.pad()
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.embed_tokens = nn.Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
assert args.encoder_conv_kernel_size % 2 == 1
self.convolutions = nn.ModuleList(
nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.encoder_conv_kernel_size,
padding=((args.encoder_conv_kernel_size - 1) // 2),
),
nn.BatchNorm1d(args.encoder_embed_dim),
nn.ReLU(),
nn.Dropout(args.encoder_dropout),
)
for _ in range(args.encoder_conv_layers)
)
self.lstm = nn.LSTM(
args.encoder_embed_dim,
args.encoder_embed_dim // 2,
num_layers=args.encoder_lstm_layers,
batch_first=True,
bidirectional=True,
)
self.apply(encoder_init)
def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):
x = self.embed_tokens(src_tokens)
x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T
for conv in self.convolutions:
x = conv(x)
x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C
src_lengths = src_lengths.cpu().long()
x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True)
x = self.lstm(x)[0]
x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0]
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if self.embed_speaker is not None:
seq_len, bsz, _ = x.size()
emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
return {
"encoder_out": [x], # B x T x C
"encoder_padding_mask": encoder_padding_mask, # B x T
}
class Prenet(nn.Module):
def __init__(self, in_dim, n_layers, n_units, dropout):
super().__init__()
self.layers = nn.ModuleList(
nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units), nn.ReLU())
for i in range(n_layers)
)
self.dropout = dropout
def forward(self, x):
for layer in self.layers:
x = F.dropout(layer(x), p=self.dropout) # always applies dropout
return x
class Postnet(nn.Module):
def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
assert kernel_size % 2 == 1
for i in range(n_layers):
cur_layers = (
[
nn.Conv1d(
in_dim if i == 0 else n_channels,
n_channels if i < n_layers - 1 else in_dim,
kernel_size=kernel_size,
padding=((kernel_size - 1) // 2),
),
nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim),
]
+ ([nn.Tanh()] if i < n_layers - 1 else [])
+ [nn.Dropout(dropout)]
)
nn.init.xavier_uniform_(
cur_layers[0].weight,
torch.nn.init.calculate_gain("tanh" if i < n_layers - 1 else "linear"),
)
self.convolutions.append(nn.Sequential(*cur_layers))
def forward(self, x):
x = x.transpose(1, 2) # B x T x C -> B x C x T
for conv in self.convolutions:
x = conv(x)
return x.transpose(1, 2)
def decoder_init(m):
if isinstance(m, torch.nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
class Tacotron2Decoder(FairseqIncrementalDecoder):
def __init__(self, args, src_dict):
super().__init__(None)
self.args = args
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.prenet = Prenet(
self.out_dim, args.prenet_layers, args.prenet_dim, args.prenet_dropout
)
# take prev_context, prev_frame, (speaker embedding) as input
self.attention_lstm = LSTMCellWithZoneOut(
args.zoneout,
args.prenet_dim + args.encoder_embed_dim,
args.decoder_lstm_dim,
)
# take attention_lstm output, attention_state, encoder_out as input
self.attention = LocationAttention(
args.attention_dim,
args.encoder_embed_dim,
args.decoder_lstm_dim,
(1 + int(args.attention_use_cumprob)),
args.attention_conv_dim,
args.attention_conv_kernel_size,
)
# take attention_lstm output, context, (gated_latent) as input
self.lstm = nn.ModuleList(
LSTMCellWithZoneOut(
args.zoneout,
args.encoder_embed_dim + args.decoder_lstm_dim,
args.decoder_lstm_dim,
)
for i in range(args.decoder_lstm_layers)
)
proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim
self.feat_proj = nn.Linear(proj_in_dim, self.out_dim)
self.eos_proj = nn.Linear(proj_in_dim, 1)
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))
self.apply(decoder_init)
def _get_states(self, incremental_state, enc_out):
bsz, in_len, _ = enc_out.size()
alstm_h = self.get_incremental_state(incremental_state, "alstm_h")
if alstm_h is None:
alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
alstm_c = self.get_incremental_state(incremental_state, "alstm_c")
if alstm_c is None:
alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
lstm_h = self.get_incremental_state(incremental_state, "lstm_h")
if lstm_h is None:
lstm_h = [
enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
for _ in range(self.args.decoder_lstm_layers)
]
lstm_c = self.get_incremental_state(incremental_state, "lstm_c")
if lstm_c is None:
lstm_c = [
enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
for _ in range(self.args.decoder_lstm_layers)
]
attn_w = self.get_incremental_state(incremental_state, "attn_w")
if attn_w is None:
attn_w = enc_out.new_zeros(bsz, in_len)
attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum")
if attn_w_cum is None:
attn_w_cum = enc_out.new_zeros(bsz, in_len)
return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum
def _get_init_attn_c(self, enc_out, enc_mask):
bsz = enc_out.size(0)
if self.args.init_attn_c == "zero":
return enc_out.new_zeros(bsz, self.args.encoder_embed_dim)
elif self.args.init_attn_c == "avg":
enc_w = (~enc_mask).type(enc_out.type())
enc_w = enc_w / enc_w.sum(dim=1, keepdim=True)
return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1)
else:
raise ValueError(f"{self.args.init_attn_c} not supported")
def forward(
self,
prev_output_tokens,
encoder_out=None,
incremental_state=None,
target_lengths=None,
**kwargs,
):
enc_mask = encoder_out["encoder_padding_mask"]
enc_out = encoder_out["encoder_out"][0]
in_len = enc_out.size(1)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:, :]
bsz, out_len, _ = prev_output_tokens.size()
prenet_out = self.prenet(prev_output_tokens)
(alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum) = self._get_states(
incremental_state, enc_out
)
attn_ctx = self._get_init_attn_c(enc_out, enc_mask)
attn_out = enc_out.new_zeros(bsz, in_len, out_len)
feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim)
eos_out = enc_out.new_zeros(bsz, out_len)
for t in range(out_len):
alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1)
alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c))
attn_state = attn_w.unsqueeze(1)
if self.args.attention_use_cumprob:
attn_state = torch.stack((attn_w, attn_w_cum), dim=1)
attn_ctx, attn_w = self.attention(enc_out, enc_mask, alstm_h, attn_state)
attn_w_cum = attn_w_cum + attn_w
attn_out[:, :, t] = attn_w
for i, cur_lstm in enumerate(self.lstm):
if i == 0:
lstm_in = torch.cat((attn_ctx, alstm_h), dim=1)
else:
lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1)
lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i]))
proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1)
feat_out[:, t, :] = self.feat_proj(proj_in)
eos_out[:, t] = self.eos_proj(proj_in).squeeze(1)
self.attention.clear_cache()
self.set_incremental_state(incremental_state, "alstm_h", alstm_h)
self.set_incremental_state(incremental_state, "alstm_c", alstm_c)
self.set_incremental_state(incremental_state, "lstm_h", lstm_h)
self.set_incremental_state(incremental_state, "lstm_c", lstm_c)
self.set_incremental_state(incremental_state, "attn_w", attn_w)
self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum)
post_feat_out = feat_out + self.postnet(feat_out)
eos_out = eos_out.view(bsz, out_len, 1)
return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out}
@register_model("tacotron_2")
class Tacotron2Model(FairseqEncoderDecoderModel):
"""
Implementation for https://arxiv.org/pdf/1712.05884.pdf
"""
@staticmethod
def add_args(parser):
# encoder
parser.add_argument("--encoder-dropout", type=float)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-conv-layers", type=int)
parser.add_argument("--encoder-conv-kernel-size", type=int)
parser.add_argument("--encoder-lstm-layers", type=int)
# decoder
parser.add_argument("--attention-dim", type=int)
parser.add_argument("--attention-conv-dim", type=int)
parser.add_argument("--attention-conv-kernel-size", type=int)
parser.add_argument("--prenet-dropout", type=float)
parser.add_argument("--prenet-layers", type=int)
parser.add_argument("--prenet-dim", type=int)
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
parser.add_argument("--init-attn-c", type=str)
parser.add_argument("--attention-use-cumprob", action="store_true")
parser.add_argument("--zoneout", type=float)
parser.add_argument("--decoder-lstm-layers", type=int)
parser.add_argument("--decoder-lstm-dim", type=int)
parser.add_argument("--output-frame-dim", type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._num_updates = 0
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker)
decoder = Tacotron2Decoder(args, task.src_dict)
return cls(encoder, decoder)
def forward_encoder(self, src_tokens, src_lengths, **kwargs):
return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
@register_model_architecture("tacotron_2", "tacotron_2")
def base_architecture(args):
# encoder
args.encoder_dropout = getattr(args, "encoder_dropout", 0.5)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3)
args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5)
args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1)
# decoder
args.attention_dim = getattr(args, "attention_dim", 128)
args.attention_conv_dim = getattr(args, "attention_conv_dim", 32)
args.attention_conv_kernel_size = getattr(args, "attention_conv_kernel_size", 15)
args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
args.prenet_layers = getattr(args, "prenet_layers", 2)
args.prenet_dim = getattr(args, "prenet_dim", 256)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
args.init_attn_c = getattr(args, "init_attn_c", "zero")
args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True)
args.zoneout = getattr(args, "zoneout", 0.1)
args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2)
args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
| 15,041
| 38.480315
| 87
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/hifigan.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, ConvTranspose1d
from torch.nn.utils import weight_norm, remove_weight_norm
LRELU_SLOPE = 0.1
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
class ResBlock(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1),
)
),
]
)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for layer in self.convs1:
remove_weight_norm(layer)
for layer in self.convs2:
remove_weight_norm(layer)
class Generator(torch.nn.Module):
def __init__(self, cfg):
super(Generator, self).__init__()
self.num_kernels = len(cfg["resblock_kernel_sizes"])
self.num_upsamples = len(cfg["upsample_rates"])
self.conv_pre = weight_norm(
Conv1d(80, cfg["upsample_initial_channel"], 7, 1, padding=3)
)
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(
zip(cfg["upsample_rates"], cfg["upsample_kernel_sizes"])
):
self.ups.append(
weight_norm(
ConvTranspose1d(
cfg["upsample_initial_channel"] // (2**i),
cfg["upsample_initial_channel"] // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = cfg["upsample_initial_channel"] // (2 ** (i + 1))
for k, d in zip(
cfg["resblock_kernel_sizes"], cfg["resblock_dilation_sizes"]
):
self.resblocks.append(ResBlock(ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print("Removing weight norm...")
for layer in self.ups:
remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
| 5,528
| 30.775862
| 76
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .tacotron2 import * # noqa
from .tts_transformer import * # noqa
from .fastspeech2 import * # noqa
| 285
| 30.777778
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/text_to_speech/fastspeech2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from torch import nn
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import (
LayerNorm,
PositionalEmbedding,
FairseqDropout,
MultiheadAttention,
)
from fairseq import utils
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models.text_to_speech.tacotron2 import Postnet
logger = logging.getLogger(__name__)
def model_init(m):
if isinstance(m, nn.Conv1d):
nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
return m
class PositionwiseFeedForward(nn.Module):
def __init__(self, in_dim, hidden_dim, kernel_size, dropout):
super().__init__()
self.ffn = nn.Sequential(
nn.Conv1d(
in_dim,
hidden_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
nn.ReLU(),
nn.Conv1d(
hidden_dim,
in_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
),
)
self.layer_norm = LayerNorm(in_dim)
self.dropout = self.dropout_module = FairseqDropout(
p=dropout, module_name=self.__class__.__name__
)
def forward(self, x):
# B x T x C
residual = x
x = self.ffn(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout(x)
return self.layer_norm(x + residual)
class FFTLayer(torch.nn.Module):
def __init__(
self, embed_dim, n_heads, hidden_dim, kernel_size, dropout, attention_dropout
):
super().__init__()
self.self_attn = MultiheadAttention(
embed_dim, n_heads, dropout=attention_dropout, self_attention=True
)
self.layer_norm = LayerNorm(embed_dim)
self.ffn = PositionwiseFeedForward(
embed_dim, hidden_dim, kernel_size, dropout=dropout
)
def forward(self, x, padding_mask=None):
# B x T x C
residual = x
x = x.transpose(0, 1)
x, _ = self.self_attn(
query=x, key=x, value=x, key_padding_mask=padding_mask, need_weights=False
)
x = x.transpose(0, 1)
x = self.layer_norm(x + residual)
return self.ffn(x)
class LengthRegulator(nn.Module):
def forward(self, x, durations):
# x: B x T x C
out_lens = durations.sum(dim=1)
max_len = out_lens.max()
bsz, seq_len, dim = x.size()
out = x.new_zeros((bsz, max_len, dim))
for b in range(bsz):
indices = []
for t in range(seq_len):
indices.extend([t] * utils.item(durations[b, t]))
indices = torch.tensor(indices, dtype=torch.long).to(x.device)
out_len = utils.item(out_lens[b])
out[b, :out_len] = x[b].index_select(0, indices)
return out, out_lens
class VariancePredictor(nn.Module):
def __init__(self, args):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv1d(
args.encoder_embed_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=(args.var_pred_kernel_size - 1) // 2,
),
nn.ReLU(),
)
self.ln1 = nn.LayerNorm(args.var_pred_hidden_dim)
self.dropout_module = FairseqDropout(
p=args.var_pred_dropout, module_name=self.__class__.__name__
)
self.conv2 = nn.Sequential(
nn.Conv1d(
args.var_pred_hidden_dim,
args.var_pred_hidden_dim,
kernel_size=args.var_pred_kernel_size,
padding=1,
),
nn.ReLU(),
)
self.ln2 = nn.LayerNorm(args.var_pred_hidden_dim)
self.proj = nn.Linear(args.var_pred_hidden_dim, 1)
def forward(self, x):
# Input: B x T x C; Output: B x T
x = self.conv1(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln1(x))
x = self.conv2(x.transpose(1, 2)).transpose(1, 2)
x = self.dropout_module(self.ln2(x))
return self.proj(x).squeeze(dim=2)
class VarianceAdaptor(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
self.no_extra_tts_adaptor = (
args.no_extra_tts_adaptor
if hasattr(args, "no_extra_tts_adaptor")
else False
)
self.length_regulator = LengthRegulator()
self.duration_predictor = VariancePredictor(args)
self.pitch_predictor = None
self.energy_predictor = None
if not self.no_extra_tts_adaptor:
self.pitch_predictor = VariancePredictor(args)
self.energy_predictor = VariancePredictor(args)
self.pitch_bins = None
self.embed_pitch = None
self.energy_bins = None
self.embed_energy = None
if not self.no_extra_tts_adaptor:
n_bins, steps = self.args.var_pred_n_bins, self.args.var_pred_n_bins - 1
self.pitch_bins = torch.linspace(args.pitch_min, args.pitch_max, steps)
self.embed_pitch = Embedding(n_bins, args.encoder_embed_dim)
self.energy_bins = torch.linspace(args.energy_min, args.energy_max, steps)
self.embed_energy = Embedding(n_bins, args.encoder_embed_dim)
def get_pitch_emb(self, x, tgt=None, factor=1.0):
out = self.pitch_predictor(x)
bins = self.pitch_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_pitch(torch.bucketize(out, bins))
else:
emb = self.embed_pitch(torch.bucketize(tgt, bins))
return out, emb
def get_energy_emb(self, x, tgt=None, factor=1.0):
out = self.energy_predictor(x)
bins = self.energy_bins.to(x.device)
if tgt is None:
out = out * factor
emb = self.embed_energy(torch.bucketize(out, bins))
else:
emb = self.embed_energy(torch.bucketize(tgt, bins))
return out, emb
def forward(
self,
x,
padding_mask,
durations=None,
pitches=None,
energies=None,
d_factor=1.0,
p_factor=1.0,
e_factor=1.0,
):
# x: B x T x C
log_dur_out = self.duration_predictor(x)
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0
)
dur_out.masked_fill_(padding_mask, 0)
# TODO: the self-predict duration
# print("dur_out: ")
# print(dur_out)
# print(dur_out.sum(-1))
pitch_out = None
energy_out = None
if not self.no_extra_tts_adaptor:
pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor)
x = x + pitch_emb
energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor)
x = x + energy_emb
x, out_lens = self.length_regulator(
x, dur_out if durations is None else durations
)
return x, out_lens, log_dur_out, pitch_out, energy_out
class FastSpeech2Encoder(FairseqEncoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(src_dict)
self.args = args
self.padding_idx = src_dict.pad()
self.n_frames_per_step = args.n_frames_per_step
self.out_dim = args.output_frame_dim * args.n_frames_per_step
self.embed_speaker = embed_speaker
self.spk_emb_proj = None
if embed_speaker is not None:
self.spk_emb_proj = nn.Linear(
args.encoder_embed_dim + args.speaker_embed_dim, args.encoder_embed_dim
)
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_tokens = Embedding(
len(src_dict), args.encoder_embed_dim, padding_idx=self.padding_idx
)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, args.encoder_embed_dim, self.padding_idx
)
self.pos_emb_alpha = nn.Parameter(torch.ones(1))
self.dec_pos_emb_alpha = nn.Parameter(torch.ones(1))
self.encoder_fft_layers = nn.ModuleList(
FFTLayer(
args.encoder_embed_dim,
args.encoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.encoder_layers)
)
self.no_extra_tts_adaptor = (
args.no_extra_tts_adaptor
if hasattr(args, "no_extra_tts_adaptor")
else False
)
self.var_adaptor = VarianceAdaptor(args)
self.decoder_fft_layers = nn.ModuleList(
FFTLayer(
args.decoder_embed_dim,
args.decoder_attention_heads,
args.fft_hidden_dim,
args.fft_kernel_size,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
)
for _ in range(args.decoder_layers)
)
self.out_proj = nn.Linear(args.decoder_embed_dim, self.out_dim)
self.postnet = None
if args.add_postnet:
self.postnet = Postnet(
self.out_dim,
args.postnet_conv_dim,
args.postnet_conv_kernel_size,
args.postnet_layers,
args.postnet_dropout,
)
self.apply(model_init)
def forward(
self,
src_tokens,
src_lengths=None,
speaker=None,
durations=None,
pitches=None,
energies=None,
**kwargs
):
# Extract phoneme embeddings
x = self.embed_tokens(src_tokens)
# Add positional embedding to phoneme embedding
enc_padding_mask = src_tokens.eq(self.padding_idx)
x += self.pos_emb_alpha * self.embed_positions(enc_padding_mask)
x = self.dropout_module(x)
# Forward several transformer encoder layers
for layer in self.encoder_fft_layers:
x = layer(x, enc_padding_mask)
if self.embed_speaker is not None:
# If speaker embedding is not None, concatenate it with feats and project it.
bsz, seq_len, _ = x.size()
emb = self.embed_speaker(speaker).expand(bsz, seq_len, -1)
x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
# Forward variance adaptor
x, out_lens, log_dur_out, pitch_out, energy_out = self.var_adaptor(
x, enc_padding_mask, durations, pitches, energies
)
# Forward Mel-spectrogram Decoder
dec_padding_mask = lengths_to_padding_mask(out_lens)
x += self.dec_pos_emb_alpha * self.embed_positions(dec_padding_mask)
for layer in self.decoder_fft_layers:
x = layer(x, dec_padding_mask)
x = self.out_proj(x)
x_post = None
if self.postnet is not None:
x_post = x + self.postnet(x)
return x, x_post, out_lens, log_dur_out, pitch_out, energy_out
@register_model("fastspeech2")
class FastSpeech2Model(FairseqEncoderModel):
"""
Implementation for https://arxiv.org/abs/2006.04558
"""
NON_AUTOREGRESSIVE = True
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# FFT blocks
parser.add_argument("--fft-hidden-dim", type=int)
parser.add_argument("--fft-kernel-size", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--encoder-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--decoder-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-attention-heads", type=int)
# variance predictor
parser.add_argument("--var-pred-n-bins", type=int)
parser.add_argument("--var-pred-hidden-dim", type=int)
parser.add_argument("--var-pred-kernel-size", type=int)
parser.add_argument("--var-pred-dropout", type=float)
# postnet
parser.add_argument("--add-postnet", action="store_true")
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
parser.add_argument("--no-extra-tts-adaptor", action="store_true")
# Settings for loading model from machine speech chain
parser.add_argument("--load-model-from-speech-chain", action="store_true")
parser.add_argument("--path-to-speech-chain-ckpt", type=str)
def __init__(self, encoder, args, src_dict):
super().__init__(encoder)
self._num_updates = 0
out_dim = args.output_frame_dim * args.n_frames_per_step
self.ctc_proj = None
if getattr(args, "ctc_weight", 0.0) > 0.0:
self.ctc_proj = nn.Linear(out_dim, len(src_dict))
if (
hasattr(args, "load_model_from_speech_chain")
and args.load_model_from_speech_chain
):
self.path_to_speech_chain_ckpt = args.path_to_speech_chain_ckpt
# self.load_model_from_speech_chain()
def load_model_from_speech_chain(self):
logging.info("Using model from %s" % self.path_to_speech_chain_ckpt)
state = torch.load(
self.path_to_speech_chain_ckpt, map_location=torch.device("cpu")
)
params_dict = dict()
for k, v in state["model"].items():
# Ignore asr parameters
if "asr_model" in k:
continue
# Load ctc projection
if "ctc" in k and self.ctc_proj is not None:
params_dict["ctc_proj"] = v
continue
# Load other tts parameters
if "tts_model" in k:
params_dict[k.replace("tts_model.", "")] = v
self.load_state_dict(params_dict, strict=False)
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker)
return cls(encoder, args, task.src_dict)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = self.ctc_proj(net_output[0])
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
@register_model_architecture("fastspeech2", "fastspeech2")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", 80)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
args.no_extra_tts_adaptor = getattr(args, "no_extra_tts_adaptor", False)
| 17,259
| 34.441478
| 89
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/huggingface/hf_gpt2.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from typing import Dict, List, Optional
import torch
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
register_model_architecture,
)
logger = logging.getLogger(__name__)
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("hf_gpt2")
class HuggingFaceGPT2LanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--embed-dim', type=int, metavar='N',
help='embedding dimension')
parser.add_argument('--num-attention-heads', type=int, metavar='N',
help='num attention heads')
parser.add_argument('--num-layers', type=int, metavar='N',
help='num layers')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability for all fully connected layers '
'in the embeddings, encoder, and pooler')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
default_architecture(args)
return cls(HuggingFaceGPT2Decoder(args, task))
class HuggingFaceGPT2Decoder(FairseqIncrementalDecoder):
def __init__(self, args, task):
try:
from transformers import GPT2Config, GPT2LMHeadModel
except ImportError:
raise ImportError(
"\n\nPlease install huggingface/transformers with:"
"\n\n pip install transformers"
)
super().__init__(task.target_dictionary)
config = GPT2Config(
vocab_size=len(task.target_dictionary),
n_positions=args.max_target_positions + 1,
n_ctx=args.max_target_positions,
n_embd=args.embed_dim,
n_layer=args.num_layers,
n_head=args.num_attention_heads,
resid_pdrop=args.dropout,
embd_pdrop=args.dropout,
attn_pdrop=args.attention_dropout,
layer_norm_epsilon=1e-6,
)
self.model = GPT2LMHeadModel(config)
# set zero embedding for padding symbol
self.pad_idx = task.target_dictionary.pad()
self.model.transformer.wte.weight.data[self.pad_idx].zero_()
self.model.transformer.wpe.weight.data[0].zero_()
def forward(
self,
prev_output_tokens,
src_lengths=None,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
features = self.extract_features(prev_output_tokens, incremental_state)
lm_logits = self.model.lm_head(features)
return (lm_logits,)
def extract_features(
self,
prev_output_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
):
if incremental_state:
past = self.get_incremental_state("past")
else:
past = None
# don't attend to padding symbols
attention_mask = prev_output_tokens.ne(self.pad_idx).int()
# set position ids to exclude padding symbols
position_ids = attention_mask * (
torch.arange(1, 1 + prev_output_tokens.size(1))
.to(prev_output_tokens)
.repeat(prev_output_tokens.size(0), 1)
)
outputs = self.model.transformer(
input_ids=prev_output_tokens,
past=past,
attention_mask=attention_mask,
position_ids=position_ids,
)
last_hidden_states = outputs[0]
if incremental_state:
self.set_incremental_state(incremental_state, "past", outputs[1])
return last_hidden_states
def max_positions(self):
return self.model.config.n_positions - 1
@register_model_architecture("hf_gpt2", "hf_gpt2")
def default_architecture(args):
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = getattr(
args, "tokens_per_sample", DEFAULT_MAX_TARGET_POSITIONS
)
args.embed_dim = getattr(args, "embed_dim", 768)
args.num_attention_heads = getattr(args, "num_attention_heads", 12)
args.num_layers = getattr(args, "num_layers", 12)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
@register_model_architecture("hf_gpt2", "hf_gpt2_medium")
def hf_gpt2_medium(args):
args.embed_dim = getattr(args, "embed_dim", 1024)
args.num_attention_heads = getattr(args, "num_attention_heads", 16)
args.num_layers = getattr(args, "num_layers", 24)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_large")
def hf_gpt2_large(args):
args.embed_dim = getattr(args, "embed_dim", 1280)
args.num_attention_heads = getattr(args, "num_attention_heads", 20)
args.num_layers = getattr(args, "num_layers", 36)
default_architecture(args)
@register_model_architecture("hf_gpt2", "hf_gpt2_xl")
def hf_gpt2_xl(args):
args.embed_dim = getattr(args, "embed_dim", 1600)
args.num_attention_heads = getattr(args, "num_attention_heads", 25)
args.num_layers = getattr(args, "num_layers", 48)
default_architecture(args)
| 5,769
| 33.142012
| 86
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/huggingface/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/huggingface/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.models.huggingface." + model_name)
| 710
| 32.857143
| 84
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/transformer_decoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.transformer import TransformerConfig
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.modules import transformer_layer
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerDecoderBase":
return "TransformerDecoder"
else:
return module_name
class TransformerDecoderBase(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.decoder_layerdrop = cfg.decoder.layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder.embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = cfg.decoder.output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder.layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(cfg, dictionary, embed_tokens)
def build_output_projection(self, cfg, dictionary, embed_tokens):
if cfg.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),
dropout=cfg.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,
factor=cfg.adaptive_softmax_factor,
tie_proj=cfg.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
num_base_layers = cfg.base_layers
for i in range(num_base_layers):
self.layers.insert(
((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),
BaseLayer(cfg),
)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
assert (
enc.size()[1] == bs
), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class TransformerDecoder(TransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection,
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn
)
| 17,896
| 36.05383
| 104
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/transformer_legacy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer.transformer_config import (
TransformerConfig,
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
DEFAULT_MIN_PARAMS_TO_WRAP,
)
from fairseq.models.transformer.transformer_base import (
TransformerModelBase,
)
@register_model("transformer")
class TransformerModel(TransformerModelBase):
"""
This is the legacy implementation of the transformer model that
uses argparse for configuration.
"""
@classmethod
def hub_models(cls):
# fmt: off
def moses_subword(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'subword_nmt',
}
def moses_fastbpe(path):
return {
'path': path,
'tokenizer': 'moses',
'bpe': 'fastbpe',
}
def spm(path):
return {
'path': path,
'bpe': 'sentencepiece',
'tokenizer': 'space',
}
return {
'transformer.wmt14.en-fr': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-fr.joined-dict.transformer.tar.bz2'),
'transformer.wmt16.en-de': 'https://dl.fbaipublicfiles.com/fairseq/models/wmt16.en-de.joined-dict.transformer.tar.bz2',
'transformer.wmt18.en-de': moses_subword('https://dl.fbaipublicfiles.com/fairseq/models/wmt18.en-de.ensemble.tar.gz'),
'transformer.wmt19.en-de': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.en-ru': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.ensemble.tar.gz'),
'transformer.wmt19.de-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.ensemble.tar.gz'),
'transformer.wmt19.ru-en': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.ensemble.tar.gz'),
'transformer.wmt19.en-de.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-de.joined-dict.single_model.tar.gz'),
'transformer.wmt19.en-ru.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.en-ru.single_model.tar.gz'),
'transformer.wmt19.de-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.de-en.joined-dict.single_model.tar.gz'),
'transformer.wmt19.ru-en.single_model': moses_fastbpe('https://dl.fbaipublicfiles.com/fairseq/models/wmt19.ru-en.single_model.tar.gz'),
'transformer.wmt20.en-ta': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz'),
'transformer.wmt20.en-iu.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz'),
'transformer.wmt20.en-iu.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz'),
'transformer.wmt20.ta-en': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz'),
'transformer.wmt20.iu-en.news': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz'),
'transformer.wmt20.iu-en.nh': spm('https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz'),
'transformer.flores101.mm100.615M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_615M.tar.gz'),
'transformer.flores101.mm100.175M': spm('https://dl.fbaipublicfiles.com/flores101/pretrained_models/flores101_mm100_175M.tar.gz'),
}
# fmt: on
def __init__(self, args, encoder, decoder):
cfg = TransformerConfig.from_namespace(args)
super().__init__(cfg, encoder, decoder)
self.args = args
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
# we want to build the args recursively in this case.
# do not set defaults so that settings defaults from various architectures still works
gen_parser_from_dataclass(
parser, TransformerConfig(), delete_default=True, with_prefix=""
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
args.share_decoder_input_output_embed = True
if getattr(args, "offload_activations", False):
args.checkpoint_activations = True # offloading implies checkpointing
if not args.share_all_embeddings:
args.min_params_to_wrap = getattr(
args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP
)
cfg = TransformerConfig.from_namespace(args)
return super().build_model(cfg, task)
@classmethod
def build_embedding(cls, args, dictionary, embed_dim, path=None):
return super().build_embedding(
TransformerConfig.from_namespace(args), dictionary, embed_dim, path
)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return super().build_encoder(
TransformerConfig.from_namespace(args), src_dict, embed_tokens
)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return super().build_decoder(
TransformerConfig.from_namespace(args), tgt_dict, embed_tokens
)
# architectures
@register_model_architecture("transformer", "transformer_tiny")
def tiny_architecture(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 64)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 64)
args.encoder_layers = getattr(args, "encoder_layers", 2)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 2)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
return base_architecture(args)
@register_model_architecture("transformer", "transformer")
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.tie_adaptive_weights = getattr(args, "tie_adaptive_weights", False)
args.checkpoint_activations = getattr(args, "checkpoint_activations", False)
args.offload_activations = getattr(args, "offload_activations", False)
if args.offload_activations:
args.checkpoint_activations = True
args.encoder_layers_to_keep = getattr(args, "encoder_layers_to_keep", None)
args.decoder_layers_to_keep = getattr(args, "decoder_layers_to_keep", None)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.quant_noise_pq_block_size = getattr(args, "quant_noise_pq_block_size", 8)
args.quant_noise_scalar = getattr(args, "quant_noise_scalar", 0)
@register_model_architecture("transformer", "transformer_iwslt_de_en")
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 1024)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
args.decoder_layers = getattr(args, "decoder_layers", 6)
base_architecture(args)
@register_model_architecture("transformer", "transformer_wmt_en_de")
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_de_big")
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
base_architecture(args)
@register_model_architecture("transformer", "transformer_vaswani_wmt_en_fr_big")
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, "dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture("transformer", "transformer_wmt_en_de_big")
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture("transformer", "transformer_wmt_en_de_big_t2t")
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
transformer_vaswani_wmt_en_de_big(args)
| 13,586
| 48.228261
| 159
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/transformer_encoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqEncoder
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.modules import transformer_layer
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from fairseq.models.transformer import (
TransformerConfig,
)
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerEncoderBase":
return "TransformerEncoder"
else:
return module_name
class TransformerEncoderBase(FairseqEncoder):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, dictionary, embed_tokens):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.encoder_layerdrop = cfg.encoder.layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder.normalize_before:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
def build_encoder_layer(self, cfg):
layer = transformer_layer.TransformerEncoderLayerBase(cfg)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens, src_lengths, return_all_hiddens, token_embeddings
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
# encoder layers
for layer in self.layers:
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None
)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = (
src_tokens.ne(self.padding_idx)
.sum(dim=1, dtype=torch.int32)
.reshape(-1, 1)
.contiguous()
)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"src_lengths": [src_lengths],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoder(TransformerEncoderBase):
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
)
def build_encoder_layer(self, args):
return super().build_encoder_layer(
TransformerConfig.from_namespace(args),
)
| 13,389
| 37.587896
| 88
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/transformer_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
from dataclasses import dataclass, field, fields
from typing import List, Optional
from omegaconf import II
from fairseq import utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.utils import safe_getattr, safe_hasattr
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
_NAME_PARSER = r"(decoder|encoder|quant_noise)_(.*)"
@dataclass
class EncDecBaseConfig(FairseqDataclass):
embed_path: Optional[str] = field(
default=None, metadata={"help": "path to pre-trained embedding"}
)
embed_dim: Optional[int] = field(
default=512, metadata={"help": "embedding dimension"}
)
ffn_embed_dim: int = field(
default=2048, metadata={"help": "embedding dimension for FFN"}
)
layers: int = field(default=6, metadata={"help": "number of layers"})
attention_heads: int = field(
default=8, metadata={"help": "number of attention heads"}
)
normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each block"}
)
learned_pos: bool = field(
default=False, metadata={"help": "use learned positional embeddings"}
)
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
layerdrop: float = field(default=0, metadata={"help": "LayerDrop probability"})
layers_to_keep: Optional[List[int]] = field(
default=None, metadata={"help": "which layers to *keep* when pruning"}
)
@dataclass
class DecoderConfig(EncDecBaseConfig):
input_dim: int = II("model.decoder.embed_dim")
output_dim: int = field(
default=II("model.decoder.embed_dim"),
metadata={
"help": "decoder output dimension (extra linear layer if different from decoder embed dim)"
},
)
def __post_init__(self):
# II doesn't work if we are just creating the object outside of hydra so fix that
if self.input_dim == II("model.decoder.embed_dim"):
self.input_dim = self.embed_dim
if self.output_dim == II("model.decoder.embed_dim"):
self.output_dim = self.embed_dim
@dataclass
class QuantNoiseConfig(FairseqDataclass):
pq: float = field(
default=0.0,
metadata={"help": "iterative PQ quantization noise at training time"},
)
pq_block_size: int = field(
default=8,
metadata={"help": "block size of quantization noise at training time"},
)
scalar: float = field(
default=0.0,
metadata={
"help": "scalar quantization noise and scalar quantization at training time"
},
)
@dataclass
class TransformerConfig(FairseqDataclass):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="relu",
metadata={"help": "activation function to use"},
)
dropout: float = field(default=0.1, metadata={"help": "dropout probability"})
attention_dropout: float = field(
default=0.0, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN.",
"alias": "--relu-dropout",
},
)
adaptive_input: bool = False
encoder: EncDecBaseConfig = EncDecBaseConfig()
# TODO should really be in the encoder config
max_source_positions: int = field(
default=DEFAULT_MAX_SOURCE_POSITIONS,
metadata={"help": "Maximum input length supported by the encoder"},
)
decoder: DecoderConfig = DecoderConfig()
# TODO should really be in the decoder config
max_target_positions: int = field(
default=DEFAULT_MAX_TARGET_POSITIONS,
metadata={"help": "Maximum output length supported by the decoder"},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
share_all_embeddings: bool = field(
default=False,
metadata={
"help": "share encoder, decoder and output embeddings (requires shared dictionary and embed dim)"
},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if True, disables positional embeddings (outside self attention)"
},
)
adaptive_softmax_cutoff: Optional[List[int]] = field(
default=None,
metadata={
"help": "list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion"
},
)
adaptive_softmax_dropout: float = field(
default=0.0,
metadata={"help": "sets adaptive softmax dropout for the tail projections"},
)
adaptive_softmax_factor: float = field(
default=4, metadata={"help": "adaptive input factor"}
)
layernorm_embedding: bool = field(
default=False, metadata={"help": "add layernorm to embedding"}
)
tie_adaptive_weights: bool = field(
default=False,
metadata={
"help": "if set, ties the weights of adaptive softmax and adaptive input"
},
)
tie_adaptive_proj: bool = field(
default=False,
metadata={
"help": "if set, ties the projection weights of adaptive softmax and adaptive input"
},
)
no_scale_embedding: bool = field(
default=False, metadata={"help": "if True, dont scale embeddings"}
)
checkpoint_activations: bool = field(
default=False,
metadata={
"help": "checkpoint activations at each layer, which saves GPU memory usage at the cost of some additional compute"
},
)
offload_activations: bool = field(
default=False,
metadata={
"help": "checkpoint activations at each layer, then save to gpu. Sets --checkpoint-activations."
},
)
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
no_cross_attention: bool = field(
default=False, metadata={"help": "do not perform cross-attention"}
)
cross_self_attention: bool = field(
default=False, metadata={"help": "perform cross+self-attention"}
)
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
quant_noise: QuantNoiseConfig = field(default=QuantNoiseConfig())
min_params_to_wrap: int = field(
default=DEFAULT_MIN_PARAMS_TO_WRAP,
metadata={
"help": "minimum number of params for a layer to be wrapped with FSDP() when "
"training with --ddp-backend=fully_sharded. Smaller values will "
"improve memory efficiency, but may make torch.distributed "
"communication less efficient due to smaller input sizes. This option "
"is set to 0 (i.e., always wrap) when --checkpoint-activations or "
"--offload-activations are passed."
},
)
# DEPRECATED field, but some old checkpoints might have it
char_inputs: bool = field(
default=False, metadata={"help": "if set, model takes character ids as input"}
)
relu_dropout: float = 0.0
# config for "BASE Layers: Simplifying Training of Large, Sparse Models"
base_layers: Optional[int] = field(
default=0, metadata={"help": "number of BASE layers in total"}
)
base_sublayers: Optional[int] = field(
default=1, metadata={"help": "number of sublayers in each BASE layer"}
)
base_shuffle: Optional[int] = field(
default=1,
metadata={"help": "shuffle tokens between workers before computing assignment"},
)
export: bool = field(
default=False,
metadata={"help": "make the layernorm exportable with torchscript."},
)
# copied from transformer_lm but expected in transformer_decoder:
no_decoder_final_norm: bool = field(
default=False,
metadata={"help": "don't add an extra layernorm after the last decoder block"},
)
# We need to make this hierarchical dataclass like the flat namespace
# __getattr__ and __setattr__ here allow backward compatibility
# for subclasses of Transformer(Legacy) that depend on read/write on
# the flat namespace.
def __getattr__(self, name):
match = re.match(_NAME_PARSER, name)
if match:
sub = safe_getattr(self, match[1])
return safe_getattr(sub, match[2])
raise AttributeError(f"invalid argument {name}.")
def __setattr__(self, name, value):
match = re.match(_NAME_PARSER, name)
if match:
sub = safe_getattr(self, match[1])
setattr(sub, match[2], value)
else:
super().__setattr__(name, value)
@staticmethod
def _copy_keys(args, cls, prefix, seen):
"""
copy the prefixed keys (decoder_embed_dim) to the DC fields: decoder.embed_dim
"""
cfg = cls()
for fld in fields(cls):
# for all the fields in the DC, find the fields (e.g. embed_dim)
# in the namespace with the prefix (e.g. decoder)
# and set it on the dc.
args_key = f"{prefix}_{fld.name}"
if safe_hasattr(args, args_key):
seen.add(args_key)
setattr(cfg, fld.name, safe_getattr(args, args_key))
if safe_hasattr(args, fld.name):
seen.add(fld.name)
setattr(cfg, fld.name, safe_getattr(args, fld.name))
return cfg
@classmethod
def from_namespace(cls, args):
if args is None:
return None
if not isinstance(args, cls):
seen = set()
config = cls()
# currently, we can go generically from DC fields to args hierarchically
# but we can't easily deconstruct a flat namespace to a hierarchical
# DC. Mostly because we could have a sub-dc called `decoder-foo` that should not
# go to the sub struct called `decoder`. There are ways to go around this, but let's keep it simple
# for now.
for fld in fields(cls):
# concretelly, the transformer_config know what sub-dc it has, so we go through all the dc fields
# and if it's one that has a sub-dc, we build that sub-dc with `copy_keys()`
if fld.name == "decoder":
if safe_hasattr(args, "decoder"):
# in some cases, the args we receive is already structured (as DictConfigs), so let's just build the correct DC
seen.add("decoder")
config.decoder = DecoderConfig(**args.decoder)
else:
config.decoder = cls._copy_keys(
args, DecoderConfig, "decoder", seen
)
elif fld.name == "encoder":
# same but for encoder
if safe_hasattr(args, "encoder"):
seen.add("encoder")
config.encoder = EncDecBaseConfig(**args.encoder)
else:
config.encoder = cls._copy_keys(
args, EncDecBaseConfig, "encoder", seen
)
elif fld.name == "quant_noise":
# same but for quant_noise
if safe_hasattr(args, "quant_noise"):
seen.add("quant_noise")
config.quant_noise = QuantNoiseConfig(**args.quant_noise)
else:
config.quant_noise = cls._copy_keys(
args, QuantNoiseConfig, "quant_noise", seen
)
elif safe_hasattr(args, fld.name):
# if it's not a structure field, it's just a normal field, copy it over
seen.add(fld.name)
setattr(config, fld.name, safe_getattr(args, fld.name))
# we got all the fields defined in the dataclass, but
# the argparse namespace might have extra args for two reasons:
# - we are in a legacy class so all the args are not declared in the dataclass. Ideally once everyone has defined a dataclass for their model, we won't need this
# - some places expect args to be there but never define them
args_dict = (
args._asdict()
if safe_hasattr(args, "_asdict")
else vars(args)
if safe_hasattr(args, "__dict__")
else {}
) # namedtupled doesn't have __dict__ :-/
for key, value in args_dict.items():
if key not in seen:
setattr(config, key, value)
return config
else:
return args
| 13,154
| 39.229358
| 175
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/__init__.py
|
# Copyright (c) Facebook Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .transformer_config import (
TransformerConfig,
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
DEFAULT_MIN_PARAMS_TO_WRAP,
)
from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear
from .transformer_encoder import TransformerEncoder, TransformerEncoderBase
from .transformer_legacy import (
TransformerModel,
base_architecture,
tiny_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de,
transformer_vaswani_wmt_en_de_big,
transformer_vaswani_wmt_en_fr_big,
transformer_wmt_en_de_big,
transformer_wmt_en_de_big_t2t,
)
from .transformer_base import TransformerModelBase, Embedding
__all__ = [
"TransformerModelBase",
"TransformerConfig",
"TransformerDecoder",
"TransformerDecoderBase",
"TransformerEncoder",
"TransformerEncoderBase",
"TransformerModel",
"Embedding",
"Linear",
"base_architecture",
"tiny_architecture",
"transformer_iwslt_de_en",
"transformer_wmt_en_de",
"transformer_vaswani_wmt_en_de_big",
"transformer_vaswani_wmt_en_fr_big",
"transformer_wmt_en_de_big",
"transformer_wmt_en_de_big_t2t",
"DEFAULT_MAX_SOURCE_POSITIONS",
"DEFAULT_MAX_TARGET_POSITIONS",
"DEFAULT_MIN_PARAMS_TO_WRAP",
]
| 1,488
| 28.196078
| 83
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/models/transformer/transformer_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqEncoderDecoderModel
from fairseq.models.transformer import (
TransformerEncoderBase,
TransformerDecoderBase,
TransformerConfig,
)
from torch import Tensor
class TransformerModelBase(FairseqEncoderDecoderModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, cfg, encoder, decoder):
super().__init__(encoder, decoder)
self.cfg = cfg
self.supports_align_args = True
@classmethod
def add_args(cls, parser):
"""Add model-specific arguments to the parser."""
# we want to build the args recursively in this case.
gen_parser_from_dataclass(
parser, TransformerConfig(), delete_default=False, with_prefix=""
)
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
# -- TODO T96535332
# bug caused by interaction between OmegaConf II and argparsing
cfg.decoder.input_dim = int(cfg.decoder.input_dim)
cfg.decoder.output_dim = int(cfg.decoder.output_dim)
# --
if cfg.encoder.layers_to_keep:
cfg.encoder.layers = len(cfg.encoder.layers_to_keep.split(","))
if cfg.decoder.layers_to_keep:
cfg.decoder.layers = len(cfg.decoder.layers_to_keep.split(","))
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if cfg.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if cfg.encoder.embed_dim != cfg.decoder.embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if cfg.decoder.embed_path and (
cfg.decoder.embed_path != cfg.encoder.embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
)
decoder_embed_tokens = encoder_embed_tokens
cfg.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
cfg, src_dict, cfg.encoder.embed_dim, cfg.encoder.embed_path
)
decoder_embed_tokens = cls.build_embedding(
cfg, tgt_dict, cfg.decoder.embed_dim, cfg.decoder.embed_path
)
if cfg.offload_activations:
cfg.checkpoint_activations = True # offloading implies checkpointing
encoder = cls.build_encoder(cfg, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens)
if not cfg.share_all_embeddings:
# fsdp_wrap is a no-op when --ddp-backend != fully_sharded
encoder = fsdp_wrap(encoder, min_num_params=cfg.min_params_to_wrap)
decoder = fsdp_wrap(decoder, min_num_params=cfg.min_params_to_wrap)
return cls(cfg, encoder, decoder)
@classmethod
def build_embedding(cls, cfg, dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
@classmethod
def build_encoder(cls, cfg, src_dict, embed_tokens):
return TransformerEncoderBase(cfg, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, cfg, tgt_dict, embed_tokens):
return TransformerDecoderBase(
cfg,
tgt_dict,
embed_tokens,
no_encoder_attn=cfg.no_cross_attention,
)
# TorchScript doesn't support optional arguments with variable length (**kwargs).
# Current workaround is to add union of all arguments in child classes.
def forward(
self,
src_tokens,
src_lengths,
prev_output_tokens,
return_all_hiddens: bool = True,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Run the forward pass for an encoder-decoder model.
Copied from the base class, but without ``**kwargs``,
which are not supported by TorchScript.
"""
encoder_out = self.encoder(
src_tokens, src_lengths=src_lengths, return_all_hiddens=return_all_hiddens
)
decoder_out = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
src_lengths=src_lengths,
return_all_hiddens=return_all_hiddens,
)
return decoder_out
# Since get_normalized_probs is in the Fairseq Model which is not scriptable,
# I rewrite the get_normalized_probs from Base Class to call the
# helper function in the Base Class.
@torch.jit.export
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
"""Get normalized probabilities (or log probs) from a net's output."""
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
| 6,756
| 36.538889
| 102
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/model_parallel/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import criterions, models, modules # noqa
| 228
| 31.714286
| 65
|
py
|
CIF-HieraDist
|
CIF-HieraDist-main/fairseq/model_parallel/megatron_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
from fairseq.dataclass.configs import FairseqConfig
from fairseq.distributed import utils as distributed_utils
from fairseq.trainer import Trainer
try:
from fairseq.model_parallel.megatron.mpu import (
get_data_parallel_rank,
get_data_parallel_world_size,
get_model_parallel_src_rank,
get_cuda_rng_tracker,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class MegatronTrainer(Trainer):
"""Main class for model parallel with data parallel training."""
def __init__(self, cfg: FairseqConfig, task, model, criterion, **kwargs):
if not has_megatron_submodule:
raise ImportError(
"\n\nPlease install the megatron submodule:"
"\n\n git submodule update --init "
"fairseq/model_parallel/megatron"
)
super().__init__(cfg, task, model, criterion, **kwargs)
def clip_grad_norm(self, clip_norm):
def _aggregate_model_parallel_grad_norm(total_norm):
total_norm = total_norm**2
distributed_utils.all_reduce(
total_norm, group=distributed_utils.get_model_parallel_group()
)
total_norm = total_norm**0.5
return total_norm
return self.optimizer.clip_grad_norm(
clip_norm,
aggregate_norm_fn=_aggregate_model_parallel_grad_norm,
)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
extra_state["rng_tracker_states"] = get_cuda_rng_tracker().get_states()
super().save_checkpoint(filename, extra_state)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
extra_state = super().load_checkpoint(
filename,
reset_optimizer=reset_optimizer,
reset_lr_scheduler=reset_lr_scheduler,
optimizer_overrides=optimizer_overrides,
reset_meters=reset_meters,
)
if extra_state is not None and "rng_tracker_states" in extra_state:
get_cuda_rng_tracker().set_states(extra_state["rng_tracker_states"])
return extra_state
| 2,570
| 32.828947
| 80
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.