repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/distributed_fairseq_model.py | translation/fairseq/models/distributed_fairseq_model.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.nn import parallel
from fairseq.distributed_utils import c10d_status
from . import BaseFairseqModel
class DistributedFairseqModel(BaseFairseqModel):
"""
A wrapper around a :class:`BaseFairseqModel` instance that adds support for
distributed training.
Anytime a method or attribute is called on this class we first try to
forward it to the underlying DistributedDataParallel instance, otherwise we
forward it to the original :class:`BaseFairseqModel` instance.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
"""
def __init__(self, args, model):
super().__init__()
assert isinstance(model, BaseFairseqModel)
if args.ddp_backend == 'c10d':
if c10d_status.is_default:
ddp_class = parallel.DistributedDataParallel
elif c10d_status.has_c10d:
ddp_class = parallel._DistributedDataParallelC10d
else:
raise Exception(
'Can\'t find c10d version of DistributedDataParallel. '
'Please update PyTorch.'
)
self.ddp_model = ddp_class(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=False,
bucket_cap_mb=args.bucket_cap_mb,
)
elif args.ddp_backend == 'no_c10d':
if c10d_status.is_default:
ddp_class = parallel.deprecated.DistributedDataParallel
else:
ddp_class = parallel.DistributedDataParallel
self.ddp_model = ddp_class(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=False,
)
else:
raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend)
def __call__(self, *args, **kwargs):
return self.ddp_model(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.ddp_model.forward(*args, **kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
pass
try:
return self.ddp_model.__getattr__(name)
except AttributeError:
pass
return self.ddp_model.module.__getattr__(name)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fconv.py | translation/fairseq/models/fconv.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import (
AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding,
LinearizedConvolution,
)
from . import (
FairseqEncoder, FairseqIncrementalDecoder, FairseqModel,
FairseqLanguageModel, register_model, register_model_architecture,
)
@register_model('fconv')
class FConvModel(FairseqModel):
"""
A fully convolutional model, i.e. a convolutional encoder and a
convolutional decoder, as described in `"Convolutional Sequence to Sequence
Learning" (Gehring et al., 2017) <https://arxiv.org/abs/1705.03122>`_.
Args:
encoder (FConvEncoder): the encoder
decoder (FConvDecoder): the decoder
The Convolutional model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.fconv_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--share-input-output-embed', action='store_true',
help='share input and output embeddings (requires'
' --decoder-out-embed-dim and --decoder-embed-dim'
' to be equal)')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
encoder_embed_dict = None
if args.encoder_embed_path:
encoder_embed_dict = utils.parse_embedding(args.encoder_embed_path)
utils.print_embed_overlap(encoder_embed_dict, task.source_dictionary)
decoder_embed_dict = None
if args.decoder_embed_path:
decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)
utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)
encoder = FConvEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
embed_dict=encoder_embed_dict,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
)
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
embed_dict=decoder_embed_dict,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
share_embed=args.share_input_output_embed,
)
return FConvModel(encoder, decoder)
@register_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, 'max_target_positions'):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
class FConvEncoder(FairseqEncoder):
"""
Convolutional encoder consisting of `len(convolutions)` layers.
Args:
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_dim (int, optional): embedding dimension
embed_dict (str, optional): filename from which to load pre-trained
embeddings
max_positions (int, optional): maximum supported input sequence length
convolutions (list, optional): the convolutional layer structure. Each
list item `i` corresponds to convolutional layer `i`. Layers are
given as ``(out_channels, kernel_width, [residual])``. Residual
connections are added between layers when ``residual=1`` (which is
the default behavior).
dropout (float, optional): dropout to be applied before each conv layer
normalization_constant (float, optional): multiplies the result of the
residual block by sqrt(value)
left_pad (bool, optional): whether the input is left-padded. Default:
``True``
"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, max_positions=1024,
convolutions=((512, 3),) * 20, dropout=0.1, left_pad=True,
):
super().__init__(dictionary)
self.dropout = dropout
self.left_pad = left_pad
self.num_attention_layers = None
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
left_pad=self.left_pad,
)
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
if kernel_size % 2 == 1:
padding = kernel_size // 2
else:
padding = 0
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size,
dropout=dropout, padding=padding)
)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
- **encoder_out** (tuple): a tuple with two elements, where the
first element is the last encoder layer's output and the
second element is the same quantity summed with the input
embedding (used for attention). The shape of both tensors is
`(batch, src_len, embed_dim)`.
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x
# project to size of convolution
x = self.fc1(x)
# used to mask padding in input
encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # -> T x B
if not encoder_padding_mask.any():
encoder_padding_mask = None
# B x T x C -> T x B x C
x = x.transpose(0, 1)
residuals = [x]
# temporal convolutions
for proj, conv, res_layer in zip(self.projections, self.convolutions, self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
if encoder_padding_mask is not None:
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
x = F.dropout(x, p=self.dropout, training=self.training)
if conv.kernel_size[0] % 2 == 1:
# padding is implicit in the conv
x = conv(x)
else:
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.t() # -> B x T
x = x.masked_fill(encoder_padding_mask.unsqueeze(-1), 0)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding) * math.sqrt(0.5)
return {
'encoder_out': (x, y),
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = (
encoder_out['encoder_out'][0].index_select(0, new_order),
encoder_out['encoder_out'][1].index_select(0, new_order),
)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
class AttentionLayer(nn.Module):
def __init__(self, conv_channels, embed_dim, bmm=None):
super().__init__()
# projects from output of convolution to embedding dimension
self.in_projection = Linear(conv_channels, embed_dim)
# projects from embedding dimension to convolution size
self.out_projection = Linear(embed_dim, conv_channels)
self.bmm = bmm if bmm is not None else torch.bmm
def forward(self, x, target_embedding, encoder_out, encoder_padding_mask):
residual = x
# attention
x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
x = self.bmm(x, encoder_out[0])
# don't attend over padding
if encoder_padding_mask is not None:
x = x.float().masked_fill(
encoder_padding_mask.unsqueeze(1),
float('-inf')
).type_as(x) # FP16 support: cast to float and back
# softmax over last dim
sz = x.size()
x = F.softmax(x.view(sz[0] * sz[1], sz[2]), dim=1)
x = x.view(sz)
attn_scores = x
x = self.bmm(x, encoder_out[1])
# scale attention output (respecting potentially different lengths)
s = encoder_out[1].size(1)
if encoder_padding_mask is None:
x = x * (s * math.sqrt(1.0 / s))
else:
s = s - encoder_padding_mask.type_as(x).sum(dim=1, keepdim=True) # exclude padding
s = s.unsqueeze(-1)
x = x * (s * s.rsqrt())
# project back
x = (self.out_projection(x) + residual) * math.sqrt(0.5)
return x, attn_scores
def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
"""Replace torch.bmm with BeamableMM."""
if beamable_mm_beam_size is not None:
del self.bmm
self.add_module('bmm', BeamableMM(beamable_mm_beam_size))
class FConvDecoder(FairseqIncrementalDecoder):
"""Convolutional decoder"""
def __init__(
self, dictionary, embed_dim=512, embed_dict=None, out_embed_dim=256,
max_positions=1024, convolutions=((512, 3),) * 20, attention=True,
dropout=0.1, share_embed=False, positional_embeddings=True,
adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0,
left_pad=False,
):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.dropout = dropout
self.left_pad = left_pad
self.need_attn = True
convolutions = extend_conv_spec(convolutions)
in_channels = convolutions[0][0]
if isinstance(attention, bool):
# expand True into [True, True, ...] and do the same with False
attention = [attention] * len(convolutions)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of '
'length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
if embed_dict:
self.embed_tokens = utils.load_embedding(embed_dict, self.dictionary, self.embed_tokens)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
left_pad=self.left_pad,
) if positional_embeddings else None
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.residuals = []
layer_in_channels = [in_channels]
for i, (out_channels, kernel_size, residual) in enumerate(convolutions):
if residual == 0:
residual_dim = out_channels
else:
residual_dim = layer_in_channels[-residual]
self.projections.append(Linear(residual_dim, out_channels)
if residual_dim != out_channels else None)
self.convolutions.append(
LinearizedConv1d(in_channels, out_channels * 2, kernel_size,
padding=(kernel_size - 1), dropout=dropout)
)
self.attention.append(AttentionLayer(out_channels, embed_dim)
if attention[i] else None)
self.residuals.append(residual)
in_channels = out_channels
layer_in_channels.append(out_channels)
self.adaptive_softmax = None
self.fc2 = self.fc3 = None
if adaptive_softmax_cutoff is not None:
assert not share_embed
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, in_channels, adaptive_softmax_cutoff,
dropout=adaptive_softmax_dropout)
else:
self.fc2 = Linear(in_channels, out_embed_dim)
if share_embed:
assert out_embed_dim == embed_dim, \
"Shared embed weights implies same dimensions " \
" out_embed_dim={} vs embed_dim={}".format(out_embed_dim, embed_dim)
self.fc3 = nn.Linear(out_embed_dim, num_embeddings)
self.fc3.weight = self.embed_tokens.weight
else:
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
def forward(self, prev_output_tokens, encoder_out_dict=None, incremental_state=None):
if encoder_out_dict is not None:
encoder_out = encoder_out_dict['encoder_out']
encoder_padding_mask = encoder_out_dict['encoder_padding_mask']
# split and transpose encoder outputs
encoder_a, encoder_b = self._split_encoder_out(encoder_out, incremental_state)
if self.embed_positions is not None:
pos_embed = self.embed_positions(prev_output_tokens, incremental_state)
else:
pos_embed = 0
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
x = self._embed_tokens(prev_output_tokens, incremental_state)
# embed tokens and combine with positional embeddings
x += pos_embed
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
# temporal convolutions
avg_attn_scores = None
num_attn_layers = len(self.attention)
residuals = [x]
for proj, conv, attention, res_layer in zip(self.projections, self.convolutions, self.attention,
self.residuals):
if res_layer > 0:
residual = residuals[-res_layer]
residual = residual if proj is None else proj(residual)
else:
residual = None
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x, incremental_state)
x = F.glu(x, dim=2)
# attention
if attention is not None:
x = self._transpose_if_training(x, incremental_state)
x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b), encoder_padding_mask)
if not self.training and self.need_attn:
attn_scores = attn_scores / num_attn_layers
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
x = self._transpose_if_training(x, incremental_state)
# residual
if residual is not None:
x = (x + residual) * math.sqrt(0.5)
residuals.append(x)
# T x B x C -> B x T x C
x = self._transpose_if_training(x, incremental_state)
# project back to size of vocabulary if not using adaptive softmax
if self.fc2 is not None and self.fc3 is not None:
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.fc3(x)
return x, avg_attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
encoder_out = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if encoder_out is not None:
encoder_out = tuple(eo.index_select(0, new_order) for eo in encoder_out)
utils.set_incremental_state(self, incremental_state, 'encoder_out', encoder_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions() if self.embed_positions is not None else float('inf')
def upgrade_state_dict(self, state_dict):
if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2:
# old models use incorrect weight norm dimension
for i, conv in enumerate(self.convolutions):
# reconfigure weight norm
nn.utils.remove_weight_norm(conv)
self.convolutions[i] = nn.utils.weight_norm(conv, dim=0)
state_dict['decoder.version'] = torch.Tensor([1])
return state_dict
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _embed_tokens(self, tokens, incremental_state):
if incremental_state is not None:
# keep only the last token for incremental forward pass
tokens = tokens[:, -1:]
return self.embed_tokens(tokens)
def _split_encoder_out(self, encoder_out, incremental_state):
"""Split and transpose encoder outputs.
This is cached when doing incremental inference.
"""
cached_result = utils.get_incremental_state(self, incremental_state, 'encoder_out')
if cached_result is not None:
return cached_result
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(1, 2).contiguous()
result = (encoder_a, encoder_b)
if incremental_state is not None:
utils.set_incremental_state(self, incremental_state, 'encoder_out', result)
return result
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def extend_conv_spec(convolutions):
"""
Extends convolutional spec that is a list of tuples of 2 or 3 parameters
(kernel size, dim size and optionally how many layers behind to look for residual)
to default the residual propagation param if it is not specified
"""
extended = []
for spec in convolutions:
if len(spec) == 3:
extended.append(spec)
elif len(spec) == 2:
extended.append(spec + (1,))
else:
raise Exception('invalid number of parameters in convolution spec ' + str(spec) + '. expected 2 or 3')
return tuple(extended)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, 0, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
nn.init.normal_(m.weight, mean=0, std=math.sqrt((1 - dropout) / in_features))
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m)
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
@register_model_architecture('fconv_lm', 'fconv_lm')
def base_lm_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', '[(1268, 4)] * 13')
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_wikitext103')
def fconv_lm_dauphin_wikitext103(args):
layers = '[(850, 6)] * 3'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 5)] * 4'
layers += ' + [(850, 1)] * 1'
layers += ' + [(850, 4)] * 3'
layers += ' + [(1024, 4)] * 1'
layers += ' + [(2048, 4)] * 1'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000')
base_lm_architecture(args)
@register_model_architecture('fconv_lm', 'fconv_lm_dauphin_gbw')
def fconv_lm_dauphin_gbw(args):
layers = '[(512, 5)]'
layers += ' + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3'
layers += ' + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6'
layers += ' + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]'
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 128)
args.decoder_layers = getattr(args, 'decoder_layers', layers)
args.decoder_attention = getattr(args, 'decoder_attention', 'False')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
base_lm_architecture(args)
@register_model_architecture('fconv', 'fconv')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.share_input_output_embed = getattr(args, 'share_input_output_embed', False)
@register_model_architecture('fconv', 'fconv_iwslt_de_en')
def fconv_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(256, 3)] * 4')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(256, 3)] * 3')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_ro')
def fconv_wmt_en_ro(args):
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_de')
def fconv_wmt_en_de(args):
convs = '[(512, 3)] * 9' # first 9 layers have 512 units
convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units
convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
@register_model_architecture('fconv', 'fconv_wmt_en_fr')
def fconv_wmt_en_fr(args):
convs = '[(512, 3)] * 6' # first 6 layers have 512 units
convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units
convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units
convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions
convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_layers = getattr(args, 'encoder_layers', convs)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_layers = getattr(args, 'decoder_layers', convs)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fconv_self_att.py | translation/fairseq/models/fconv_self_att.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import (
DownsampledMultiHeadAttention, GradMultiply, LearnedPositionalEmbedding,
LinearizedConvolution,
)
from fairseq import utils
from . import (
FairseqEncoder, CompositeEncoder, FairseqDecoder, FairseqModel,
register_model, register_model_architecture,
)
@register_model('fconv_self_att')
class FConvModelSelfAtt(FairseqModel):
def __init__(self, encoder, decoder, pretrained_encoder=None):
super().__init__(encoder, decoder)
self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
self.pretrained_encoder = pretrained_encoder
if self.pretrained_encoder is None:
encoders = {'encoder': encoder}
else:
encoders = {'encoder': encoder, 'pretrained': self.pretrained_encoder}
# for fusion model, CompositeEncoder contains both pretrained and training encoders
# these are forwarded and then combined in the decoder
self.encoder = CompositeEncoder(encoders)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-layers', type=str, metavar='EXPR',
help='encoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR',
help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR',
help='decoder attention [True, ...]')
parser.add_argument('--self-attention', type=str, metavar='EXPR',
help='decoder self-attention layers, ex: [True] + [False]*5')
parser.add_argument('--multihead-attention-nheads', type=int,
help='Number of heads to use in attention')
parser.add_argument('--multihead-self-attention-nheads', type=int,
help='Number of heads to use in self-attention')
parser.add_argument('--encoder-attention', type=str, metavar='EXPR',
help='encoder attention [True, ...]')
parser.add_argument('--encoder-attention-nheads', type=int,
help='Number of heads to use in encoder attention')
parser.add_argument('--project-input', type=str, metavar='EXPR',
help='Use projections in self-attention [True, ...]')
parser.add_argument('--gated-attention', type=str, metavar='EXPR',
help='Use GLU layers in self-attention projections [True, ...]')
parser.add_argument('--downsample', type=str, metavar='EXPR',
help='Use downsampling in self-attention [True, ...]')
parser.add_argument('--pretrained-checkpoint', metavar='DIR',
help='path to load checkpoint from pretrained model')
parser.add_argument('--pretrained', type=str, metavar='EXPR',
help='use pretrained model when training [True, ...]')
@classmethod
def build_model(cls, args, task):
trained_encoder, trained_decoder = None, None
pretrained = eval(args.pretrained)
if pretrained:
print("| loading pretrained model")
trained_model = utils.load_ensemble_for_inference(
# not actually for inference, but loads pretrained model parameters
filenames=[args.pretrained_checkpoint],
task=task,
)[0][0]
trained_decoder = list(trained_model.children())[1]
trained_encoder = list(trained_model.children())[0]
# freeze pretrained model
for param in trained_decoder.parameters():
param.requires_grad = False
for param in trained_encoder.parameters():
param.requires_grad = False
"""Build a new model instance."""
encoder = FConvEncoder(
task.source_dictionary,
embed_dim=args.encoder_embed_dim,
convolutions=eval(args.encoder_layers),
dropout=args.dropout,
max_positions=args.max_source_positions,
attention=eval(args.encoder_attention),
attention_nheads=args.encoder_attention_nheads
)
decoder = FConvDecoder(
task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_out_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.max_target_positions,
selfattention=eval(args.self_attention),
attention_nheads=args.multihead_attention_nheads,
selfattention_nheads=args.multihead_self_attention_nheads,
project_input=eval(args.project_input),
gated_attention=eval(args.gated_attention),
downsample=eval(args.downsample),
pretrained=pretrained,
trained_decoder=trained_decoder
)
model = FConvModelSelfAtt(encoder, decoder, trained_encoder)
return model
@property
def pretrained(self):
return self.pretrained_encoder is not None
class FConvEncoder(FairseqEncoder):
"""Convolutional encoder"""
def __init__(
self, dictionary, embed_dim=512, max_positions=1024,
convolutions=((512, 3),) * 20, dropout=0.1, attention=False,
attention_nheads=1, left_pad=True,
):
super().__init__(dictionary)
self.dropout = dropout
self.num_attention_layers = None
self.left_pad = left_pad
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
self.padding_idx,
left_pad=self.left_pad,
)
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
in_channels = convolutions[0][0]
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels) if in_channels != out_channels else None
)
self.convolutions.append(
ConvTBC(in_channels, out_channels * 2, kernel_size, dropout=dropout)
)
self.attention.append(
SelfAttention(out_channels, embed_dim, attention_nheads) if attention[i] else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, embed_dim)
def forward(self, src_tokens, src_lengths):
# embed tokens and positions
x = self.embed_tokens(src_tokens) + self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
input_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
padding_l = (conv.kernel_size[0] - 1) // 2
padding_r = conv.kernel_size[0] // 2
x = F.pad(x, (0, 0, 0, 0, padding_l, padding_r))
x = conv(x)
x = F.glu(x, dim=2)
if attention is not None:
x = attention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# project back to size of embedding
x = self.fc2(x)
# scale gradients (this only affects backward, not forward)
x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
# add output to input embedding for attention
y = (x + input_embedding.transpose(0, 1)) * math.sqrt(0.5)
return {
'encoder_out': (x, y),
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple(
eo.index_select(0, new_order) for eo in encoder_out['encoder_out']
)
if 'pretrained' in encoder_out:
encoder_out['pretrained']['encoder_out'] = tuple(
eo.index_select(0, new_order)
for eo in encoder_out['pretrained']['encoder_out']
)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return self.embed_positions.max_positions()
class FConvDecoder(FairseqDecoder):
"""Convolutional decoder"""
def __init__(
self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024,
convolutions=((512, 3),) * 8, attention=True, dropout=0.1,
selfattention=False, attention_nheads=1, selfattention_nheads=1,
project_input=False, gated_attention=False, downsample=False,
pretrained=False, trained_decoder=None, left_pad=False,
):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout = dropout
self.left_pad = left_pad
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
# expand True into [True, True, ...] and do the same with False
return [val] * len(convolutions)
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if not isinstance(attention, list) or len(attention) != len(convolutions):
raise ValueError('Attention is expected to be a list of booleans of '
'length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(
max_positions,
embed_dim,
padding_idx,
left_pad=self.left_pad,
)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for i, (out_channels, kernel_size) in enumerate(convolutions):
self.projections.append(
Linear(in_channels, out_channels) if in_channels != out_channels else None
)
self.convolutions.append(
LinearizedConv1d(
in_channels, out_channels * 2, kernel_size,
padding=(kernel_size - 1), dropout=dropout,
)
)
self.attention.append(
DownsampledMultiHeadAttention(
out_channels, embed_dim, attention_nheads,
project_input=project_input, gated=False, downsample=False,
) if attention[i] else None
)
self.attproj.append(
Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None
)
self.selfattention.append(
SelfAttention(
out_channels, embed_dim, selfattention_nheads,
project_input=project_input, gated=gated_attention,
downsample=downsample,
) if selfattention[i] else None
)
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
# model fusion
if self.pretrained:
# independent gates are learned from the concatenated input
self.gate1 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())
self.gate2 = nn.Sequential(Linear(out_embed_dim*2, out_embed_dim), nn.Sigmoid())
# pretrained and trained models are joined
self.joining = nn.Sequential(
Linear(out_embed_dim*2, out_embed_dim*2),
nn.LayerNorm(out_embed_dim*2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim*2),
nn.LayerNorm(out_embed_dim*2),
nn.GLU(),
Linear(out_embed_dim, out_embed_dim),
nn.LayerNorm(out_embed_dim)
)
# pretrained model contains an output layer that is nhid -> vocab size
# but the models are combined in their hidden state
# the hook stores the output of the pretrained model forward
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs["out"] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out_dict):
encoder_out = encoder_out_dict['encoder']['encoder_out']
trained_encoder_out = encoder_out_dict['pretrained'] if self.pretrained else None
encoder_a, encoder_b = self._split_encoder_out(encoder_out)
# embed positions
positions = self.embed_positions(prev_output_tokens)
# embed tokens and positions
x = self.embed_tokens(prev_output_tokens) + positions
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
# project to size of convolution
x = self.fc1(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# temporal convolutions
avg_attn_scores = None
for proj, conv, attention, selfattention, attproj in zip(
self.projections, self.convolutions, self.attention, self.selfattention, self.attproj
):
residual = x if proj is None else proj(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
# attention
if attention is not None:
r = x
x, attn_scores = attention(attproj(x) + target_embedding, encoder_a, encoder_b)
x = x + r
if not self.training and self.need_attn:
if avg_attn_scores is None:
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if selfattention is not None:
x = selfattention(x)
x = (x + residual) * math.sqrt(0.5)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if not self.pretrained:
x = self.fc3(x)
# fusion gating
if self.pretrained:
trained_x, _ = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs["out"]], dim=-1)
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = gate1 * x
gated_x2 = gate2 * self.pretrained_outputs["out"]
fusion = torch.cat([gated_x1, gated_x2], dim=-1)
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return fusion_output, avg_attn_scores
else:
return x, avg_attn_scores
def max_positions(self):
"""Maximum output length supported by the decoder."""
return self.embed_positions.max_positions()
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
"""Split and transpose encoder outputs."""
# transpose only once to speed up attention layers
encoder_a, encoder_b = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result
class SelfAttention(nn.Module):
def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False):
super().__init__()
self.attention = DownsampledMultiHeadAttention(
out_channels, embed_dim, num_heads, dropout=0, bias=True,
project_input=project_input, gated=gated, downsample=downsample,
)
self.in_proj_q = Linear(out_channels, embed_dim)
self.in_proj_k = Linear(out_channels, embed_dim)
self.in_proj_v = Linear(out_channels, embed_dim)
self.ln = nn.LayerNorm(out_channels)
def forward(self, x):
residual = x
query = self.in_proj_q(x)
key = self.in_proj_k(x)
value = self.in_proj_v(x)
x, _ = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True)
return self.ln(x + residual)
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, 0.1)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad)
m.weight.data.normal_(0, 0.1)
return m
def Linear(in_features, out_features, dropout=0.):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer"""
from fairseq.modules import ConvTBC
m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return m
@register_model_architecture('fconv_self_att', 'fconv_self_att')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_attention = getattr(args, 'decoder_attention', 'True')
args.self_attention = getattr(args, 'self_attention', 'False')
args.encoder_attention = getattr(args, 'encoder_attention', 'False')
args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1)
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1)
args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1)
args.project_input = getattr(args, 'project_input', 'False')
args.gated_attention = getattr(args, 'gated_attention', 'False')
args.downsample = getattr(args, 'downsample', 'False')
args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '')
args.pretrained = getattr(args, 'pretrained', 'False')
@register_model_architecture('fconv_self_att', 'fconv_self_att_wp')
def fconv_self_att_wp(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1')
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1')
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.self_attention = getattr(args, 'self_attention', 'True')
args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4)
args.project_input = getattr(args, 'project_input', 'True')
args.gated_attention = getattr(args, 'gated_attention', 'True')
args.downsample = getattr(args, 'downsample', 'True')
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fairseq_encoder.py | translation/fairseq/models/fairseq_encoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn as nn
class FairseqEncoder(nn.Module):
"""Base class for encoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
"""
raise NotImplementedError
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to `new_order`.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
`encoder_out` rearranged according to `new_order`
"""
raise NotImplementedError
def max_positions(self):
"""Maximum input length supported by the encoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fairseq_incremental_decoder.py | translation/fairseq/models/fairseq_incremental_decoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqDecoder
class FairseqIncrementalDecoder(FairseqDecoder):
"""Base class for incremental decoders.
Incremental decoding is a special mode at inference time where the Model
only receives a single timestep of input corresponding to the immediately
previous output token (for input feeding) and must produce the next output
*incrementally*. Thus the model must cache any long-term state that is
needed about the sequence, e.g., hidden states, convolutional states, etc.
Compared to the standard :class:`FairseqDecoder` interface, the incremental
decoder interface allows :func:`forward` functions to take an extra keyword
argument (*incremental_state*) that can be used to cache state across
time-steps.
The :class:`FairseqIncrementalDecoder` interface also defines the
:func:`reorder_incremental_state` method, which is used during beam search
to select and reorder the incremental state based on the selection of beams.
"""
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
raise NotImplementedError
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder incremental state.
This should be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
def apply_reorder_incremental_state(module):
if module != self and hasattr(module, 'reorder_incremental_state'):
module.reorder_incremental_state(
incremental_state,
new_order,
)
self.apply(apply_reorder_incremental_state)
def set_beam_size(self, beam_size):
"""Sets the beam size in the decoder and all children."""
if getattr(self, '_beam_size', -1) != beam_size:
def apply_set_beam_size(module):
if module != self and hasattr(module, 'set_beam_size'):
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/lstm.py | translation/fairseq/models/lstm.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.modules import AdaptiveSoftmax
from . import (
FairseqEncoder, FairseqIncrementalDecoder, FairseqModel, register_model,
register_model_architecture,
)
@register_model('lstm')
class LSTMModel(FairseqModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-hidden-size', type=int, metavar='N',
help='encoder hidden size')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='number of encoder layers')
parser.add_argument('--encoder-bidirectional', action='store_true',
help='make all layers of encoder bidirectional')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N',
help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL',
help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument('--encoder-dropout-in', type=float, metavar='D',
help='dropout probability for encoder input embedding')
parser.add_argument('--encoder-dropout-out', type=float, metavar='D',
help='dropout probability for encoder output')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D',
help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D',
help='dropout probability for decoder output')
parser.add_argument('--share-decoder-input-output-embed', default=False,
action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', default=False, action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted (in case there are any new ones)
base_architecture(args)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
if args.encoder_embed_path:
pretrained_encoder_embed = load_pretrained_embedding_from_file(
args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim)
else:
num_embeddings = len(task.source_dictionary)
pretrained_encoder_embed = Embedding(
num_embeddings, args.encoder_embed_dim, task.source_dictionary.pad()
)
if args.share_all_embeddings:
# double check all parameters combinations are valid
if task.source_dictionary != task.target_dictionary:
raise RuntimeError('--share-all-embeddings requires a joint dictionary')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError(
'--share-all-embed not compatible with --decoder-embed-path'
)
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to '
'match --decoder-embed-dim'
)
pretrained_decoder_embed = pretrained_encoder_embed
args.share_decoder_input_output_embed = True
else:
# separate decoder input embeddings
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(
args.decoder_embed_path,
task.target_dictionary,
args.decoder_embed_dim
)
# one last double check of parameter combinations
if args.share_decoder_input_output_embed and (
args.decoder_embed_dim != args.decoder_out_embed_dim):
raise RuntimeError(
'--share-decoder-input-output-embeddings requires '
'--decoder-embed-dim to match --decoder-out-embed-dim'
)
encoder = LSTMEncoder(
dictionary=task.source_dictionary,
embed_dim=args.encoder_embed_dim,
hidden_size=args.encoder_hidden_size,
num_layers=args.encoder_layers,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
bidirectional=args.encoder_bidirectional,
pretrained_embed=pretrained_encoder_embed,
)
decoder = LSTMDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
hidden_size=args.decoder_hidden_size,
out_embed_dim=args.decoder_out_embed_dim,
num_layers=args.decoder_layers,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
attention=options.eval_bool(args.decoder_attention),
encoder_embed_dim=args.encoder_embed_dim,
encoder_output_units=encoder.output_units,
pretrained_embed=pretrained_decoder_embed,
share_input_output_embed=args.share_decoder_input_output_embed,
adaptive_softmax_cutoff=(
options.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == 'adaptive_loss' else None
),
)
return cls(encoder, decoder)
class LSTMEncoder(FairseqEncoder):
"""LSTM encoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, num_layers=1,
dropout_in=0.1, dropout_out=0.1, bidirectional=False,
left_pad=True, pretrained_embed=None, padding_value=0.,
):
super().__init__(dictionary)
self.num_layers = num_layers
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.bidirectional = bidirectional
self.hidden_size = hidden_size
num_embeddings = len(dictionary)
self.padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx)
else:
self.embed_tokens = pretrained_embed
self.lstm = LSTM(
input_size=embed_dim,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=self.dropout_out if num_layers > 1 else 0.,
bidirectional=bidirectional,
)
self.left_pad = left_pad
self.padding_value = padding_value
self.output_units = hidden_size
if bidirectional:
self.output_units *= 2
def forward(self, src_tokens, src_lengths):
if self.left_pad:
# convert left-padding to right-padding
src_tokens = utils.convert_padding_direction(
src_tokens,
self.padding_idx,
left_to_right=True,
)
bsz, seqlen = src_tokens.size()
# embed tokens
x = self.embed_tokens(src_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# pack embedded source tokens into a PackedSequence
packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist())
# apply LSTM
if self.bidirectional:
state_size = 2 * self.num_layers, bsz, self.hidden_size
else:
state_size = self.num_layers, bsz, self.hidden_size
h0 = x.data.new(*state_size).zero_()
c0 = x.data.new(*state_size).zero_()
packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))
# unpack outputs and apply dropout
x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_value)
x = F.dropout(x, p=self.dropout_out, training=self.training)
assert list(x.size()) == [seqlen, bsz, self.output_units]
if self.bidirectional:
def combine_bidir(outs):
return outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous().view(self.num_layers, bsz, -1)
final_hiddens = combine_bidir(final_hiddens)
final_cells = combine_bidir(final_cells)
encoder_padding_mask = src_tokens.eq(self.padding_idx).t()
return {
'encoder_out': (x, final_hiddens, final_cells),
'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out['encoder_out'] = tuple(
eo.index_select(1, new_order)
for eo in encoder_out['encoder_out']
)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return int(1e5) # an arbitrary large number
class AttentionLayer(nn.Module):
def __init__(self, input_embed_dim, output_embed_dim):
super().__init__()
self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
self.output_proj = Linear(input_embed_dim + output_embed_dim, output_embed_dim, bias=False)
def forward(self, input, source_hids, encoder_padding_mask):
# input: bsz x input_embed_dim
# source_hids: srclen x bsz x output_embed_dim
# x: bsz x output_embed_dim
x = self.input_proj(input)
# compute attention
attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
# don't attend over padding
if encoder_padding_mask is not None:
attn_scores = attn_scores.float().masked_fill_(
encoder_padding_mask,
float('-inf')
).type_as(attn_scores) # FP16 support: cast to float and back
attn_scores = F.softmax(attn_scores, dim=0) # srclen x bsz
# sum weighted sources
x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
return x, attn_scores
class LSTMDecoder(FairseqIncrementalDecoder):
"""LSTM decoder."""
def __init__(
self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512,
num_layers=1, dropout_in=0.1, dropout_out=0.1, attention=True,
encoder_embed_dim=512, encoder_output_units=512, pretrained_embed=None,
share_input_output_embed=False, adaptive_softmax_cutoff=None,
):
super().__init__(dictionary)
self.dropout_in = dropout_in
self.dropout_out = dropout_out
self.hidden_size = hidden_size
self.share_input_output_embed = share_input_output_embed
self.need_attn = True
self.adaptive_softmax = None
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
if pretrained_embed is None:
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
else:
self.embed_tokens = pretrained_embed
self.encoder_output_units = encoder_output_units
assert encoder_output_units == hidden_size, \
'encoder_output_units ({}) != hidden_size ({})'.format(encoder_output_units, hidden_size)
# TODO another Linear layer if not equal
self.layers = nn.ModuleList([
LSTMCell(
input_size=encoder_output_units + embed_dim if layer == 0 else hidden_size,
hidden_size=hidden_size,
)
for layer in range(num_layers)
])
self.attention = AttentionLayer(encoder_output_units, hidden_size) if attention else None
if hidden_size != out_embed_dim:
self.additional_fc = Linear(hidden_size, out_embed_dim)
if adaptive_softmax_cutoff is not None:
# setting adaptive_softmax dropout to dropout_out for now but can be redefined
self.adaptive_softmax = AdaptiveSoftmax(num_embeddings, embed_dim, adaptive_softmax_cutoff,
dropout=dropout_out)
elif not self.share_input_output_embed:
self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
def forward(self, prev_output_tokens, encoder_out_dict, incremental_state=None):
encoder_out = encoder_out_dict['encoder_out']
encoder_padding_mask = encoder_out_dict['encoder_padding_mask']
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bsz, seqlen = prev_output_tokens.size()
# get outputs from encoder
encoder_outs, _, _ = encoder_out[:3]
srclen = encoder_outs.size(0)
# embed tokens
x = self.embed_tokens(prev_output_tokens)
x = F.dropout(x, p=self.dropout_in, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# initialize previous states (or get from cache during incremental generation)
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is not None:
prev_hiddens, prev_cells, input_feed = cached_state
else:
_, encoder_hiddens, encoder_cells = encoder_out[:3]
num_layers = len(self.layers)
prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)]
prev_cells = [encoder_cells[i] for i in range(num_layers)]
input_feed = x.data.new(bsz, self.encoder_output_units).zero_()
attn_scores = x.data.new(srclen, seqlen, bsz).zero_()
outs = []
for j in range(seqlen):
# input feeding: concatenate context vector from previous time step
input = torch.cat((x[j, :, :], input_feed), dim=1)
for i, rnn in enumerate(self.layers):
# recurrent cell
hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
# hidden state becomes the input to the next layer
input = F.dropout(hidden, p=self.dropout_out, training=self.training)
# save state for next time step
prev_hiddens[i] = hidden
prev_cells[i] = cell
# apply attention using the last layer's hidden state
if self.attention is not None:
out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs, encoder_padding_mask)
else:
out = hidden
out = F.dropout(out, p=self.dropout_out, training=self.training)
# input feeding
input_feed = out
# save final output
outs.append(out)
# cache previous states (no-op except during incremental generation)
utils.set_incremental_state(
self, incremental_state, 'cached_state', (prev_hiddens, prev_cells, input_feed))
# collect outputs across time steps
x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size)
# T x B x C -> B x T x C
x = x.transpose(1, 0)
# srclen x tgtlen x bsz -> bsz x tgtlen x srclen
if not self.training and self.need_attn:
attn_scores = attn_scores.transpose(0, 2)
else:
attn_scores = None
# project back to size of vocabulary
if self.adaptive_softmax is None:
if hasattr(self, 'additional_fc'):
x = self.additional_fc(x)
x = F.dropout(x, p=self.dropout_out, training=self.training)
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = self.fc_out(x)
return x, attn_scores
def reorder_incremental_state(self, incremental_state, new_order):
super().reorder_incremental_state(incremental_state, new_order)
cached_state = utils.get_incremental_state(self, incremental_state, 'cached_state')
if cached_state is None:
return
def reorder_state(state):
if isinstance(state, list):
return [reorder_state(state_i) for state_i in state]
return state.index_select(0, new_order)
new_state = tuple(map(reorder_state, cached_state))
utils.set_incremental_state(self, incremental_state, 'cached_state', new_state)
def max_positions(self):
"""Maximum output length supported by the decoder."""
return int(1e5) # an arbitrary large number
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.uniform_(m.weight, -0.1, 0.1)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LSTM(input_size, hidden_size, **kwargs):
m = nn.LSTM(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def LSTMCell(input_size, hidden_size, **kwargs):
m = nn.LSTMCell(input_size, hidden_size, **kwargs)
for name, param in m.named_parameters():
if 'weight' in name or 'bias' in name:
param.data.uniform_(-0.1, 0.1)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.uniform_(-0.1, 0.1)
if bias:
m.bias.data.uniform_(-0.1, 0.1)
return m
@register_model_architecture('lstm', 'lstm')
def base_architecture(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', args.encoder_embed_dim)
args.encoder_layers = getattr(args, 'encoder_layers', 1)
args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.decoder_attention = getattr(args, 'decoder_attention', '1')
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000')
@register_model_architecture('lstm', 'lstm_wiseman_iwslt_de_en')
def lstm_wiseman_iwslt_de_en(args):
args.dropout = getattr(args, 'dropout', 0.1)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
base_architecture(args)
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000)
args.encoder_layers = getattr(args, 'encoder_layers', 4)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1000)
args.decoder_layers = getattr(args, 'decoder_layers', 4)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 1000)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0)
base_architecture(args)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/__init__.py | translation/fairseq/models/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import importlib
import os
from .fairseq_decoder import FairseqDecoder # noqa: F401
from .fairseq_encoder import FairseqEncoder # noqa: F401
from .fairseq_incremental_decoder import FairseqIncrementalDecoder # noqa: F401
from .fairseq_model import BaseFairseqModel, FairseqModel, FairseqLanguageModel # noqa: F401
from .composite_encoder import CompositeEncoder # noqa: F401
from .distributed_fairseq_model import DistributedFairseqModel # noqa: F401
MODEL_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
def build_model(args, task):
return ARCH_MODEL_REGISTRY[args.arch].build_model(args, task)
def register_model(name):
"""
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqModel` for sequence-to-sequence
tasks or :class:`FairseqLanguageModel` for language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model ({})'.format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError('Model ({}: {}) must extend BaseFairseqModel'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""
New model architectures can be added to fairseq with the
:func:`register_model_architecture` function decorator. After registration,
model architectures can be selected with the ``--arch`` command-line
argument.
For example::
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000)
(...)
The decorated function should take a single argument *args*, which is a
:class:`argparse.Namespace` of arguments parsed from the command-line. The
decorated function should modify these arguments in-place to match the
desired architecture.
Args:
model_name (str): the name of the Model (Model must already be
registered)
arch_name (str): the name of the model architecture (``--arch``)
"""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if not callable(fn):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
# automatically import any Python files in the models/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
model_name = file[:file.find('.py')]
module = importlib.import_module('fairseq.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fairseq_model.py | translation/fairseq/models/fairseq_model.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import FairseqDecoder, FairseqEncoder
class BaseFairseqModel(nn.Module):
"""Base class for fairseq models."""
def __init__(self):
super().__init__()
self._is_generation_fast = False
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
pass
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
raise NotImplementedError
def get_targets(self, sample, net_output):
"""Get targets from either the sample or the net's output."""
return sample['target']
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, 'decoder'):
return self.decoder.get_normalized_probs(net_output, log_probs, sample)
elif torch.is_tensor(net_output):
logits = net_output.float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
raise NotImplementedError
def max_positions(self):
"""Maximum length supported by the model."""
return None
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder.max_positions()
def load_state_dict(self, state_dict, strict=True):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
super().load_state_dict(state_dict, strict)
def upgrade_state_dict(self, state_dict):
"""Upgrade old state dicts to work with newer code."""
self.upgrade_state_dict_named(state_dict, '')
def upgrade_state_dict_named(self, state_dict, name):
assert state_dict is not None
def do_upgrade(m, prefix):
if len(prefix) > 0:
prefix += '.'
for n, c in m.named_children():
name = prefix + n
if hasattr(c, 'upgrade_state_dict_named'):
c.upgrade_state_dict_named(state_dict, name)
elif hasattr(c, 'upgrade_state_dict'):
c.upgrade_state_dict(state_dict)
do_upgrade(c, name)
do_upgrade(self, name)
def make_generation_fast_(self, **kwargs):
"""Optimize model for faster generation."""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError: # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module):
if module != self and hasattr(module, 'make_generation_fast_'):
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
# this model should no longer be used for training
self.eval()
self.train = train
def prepare_for_onnx_export_(self, **kwargs):
"""Make model exportable via ONNX trace."""
def apply_prepare_for_onnx_export_(module):
if module != self and hasattr(module, 'prepare_for_onnx_export_'):
module.prepare_for_onnx_export_(**kwargs)
self.apply(apply_prepare_for_onnx_export_)
class FairseqModel(BaseFairseqModel):
"""Base class for encoder-decoder models.
Args:
encoder (FairseqEncoder): the encoder
decoder (FairseqDecoder): the decoder
"""
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert isinstance(self.encoder, FairseqEncoder)
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens):
"""
Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
the decoder's output, typically of shape `(batch, tgt_len, vocab)`
"""
encoder_out = self.encoder(src_tokens, src_lengths)
decoder_out = self.decoder(prev_output_tokens, encoder_out)
return decoder_out
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder.max_positions(), self.decoder.max_positions())
class FairseqLanguageModel(BaseFairseqModel):
"""Base class for decoder-only models.
Args:
decoder (FairseqDecoder): the decoder
"""
def __init__(self, decoder):
super().__init__()
self.decoder = decoder
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths):
"""
Run the forward pass for a decoder-only model.
Feeds a batch of tokens through the decoder to predict the next tokens.
Args:
src_tokens (LongTensor): tokens on which to condition the decoder,
of shape `(batch, tgt_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
Returns:
the decoder's output, typically of shape `(batch, seq_len, vocab)`
"""
return self.decoder(src_tokens)
def max_positions(self):
"""Maximum length supported by the model."""
return self.decoder.max_positions()
@property
def supported_targets(self):
return {'future'}
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/composite_encoder.py | translation/fairseq/models/composite_encoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqEncoder
class CompositeEncoder(FairseqEncoder):
"""
A wrapper around a dictionary of :class:`FairseqEncoder` objects.
We run forward on each encoder and return a dictionary of outputs. The first
encoder's dictionary is used for initialization.
Args:
encoders (dict): a dictionary of :class:`FairseqEncoder` objects.
"""
def __init__(self, encoders):
super().__init__(next(iter(encoders.values())).dictionary)
self.encoders = encoders
for key in self.encoders:
self.add_module(key, self.encoders[key])
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): lengths of each source sentence of shape
`(batch)`
Returns:
dict:
the outputs from each Encoder
"""
encoder_out = {}
for key in self.encoders:
encoder_out[key] = self.encoders[key](src_tokens, src_lengths)
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
"""Reorder encoder output according to new_order."""
for key in self.encoders:
encoder_out[key] = self.encoders[key].reorder_encoder_out(encoder_out[key], new_order)
return encoder_out
def max_positions(self):
return min([self.encoders[key].max_positions() for key in self.encoders])
def upgrade_state_dict(self, state_dict):
for key in self.encoders:
self.encoders[key].upgrade_state_dict(state_dict)
return state_dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/transformer.py | translation/fairseq/models/transformer.py | # Modified by Zhuohan Li in May 2019 for macaron-net
#
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options
from fairseq import utils
from fairseq.modules import (
AdaptiveSoftmax, CharacterTokenEmbedder, LearnedPositionalEmbedding, MultiheadAttention,
SinusoidalPositionalEmbedding
)
from . import (
FairseqIncrementalDecoder, FairseqEncoder, FairseqLanguageModel, FairseqModel, register_model,
register_model_architecture,
)
@register_model('transformer')
class TransformerModel(FairseqModel):
"""
Transformer model from `"Attention Is All You Need" (Vaswani, et al, 2017)
<https://arxiv.org/abs/1706.03762>`_.
Args:
encoder (TransformerEncoder): the encoder
decoder (TransformerDecoder): the decoder
The Transformer model provides the following named architectures and
command-line arguments:
.. argparse::
:ref: fairseq.models.transformer_parser
:prog:
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--macaron', action='store_true',
help='use the macaron network')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise RuntimeError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = TransformerEncoder(args, src_dict, encoder_embed_tokens)
decoder = TransformerDecoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder)
@register_model('transformer_lm')
class TransformerLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0., type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4,
help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2,
help='number of highway layers for character token embeddder')
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = args.tokens_per_sample
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
decoder = TransformerDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return TransformerLanguageModel(decoder)
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
left_pad (bool, optional): whether the input is left-padded. Default:
``True``
"""
def __init__(self, args, dictionary, embed_tokens, left_pad=True):
super().__init__(dictionary)
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.encoder_normalize_before
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, src_tokens, src_lengths):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
encoder_padding_mask = None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if self.normalize:
x = self.layer_norm(x)
return {
'encoder_out': x, # T x B x C
'encoder_padding_mask': encoder_padding_mask, # B x T
}
def reorder_encoder_out(self, encoder_out, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if encoder_out['encoder_out'] is not None:
encoder_out['encoder_out'] = \
encoder_out['encoder_out'].index_select(1, new_order)
if encoder_out['encoder_padding_mask'] is not None:
encoder_out['encoder_padding_mask'] = \
encoder_out['encoder_padding_mask'].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'encoder.embed_positions.weights' in state_dict:
del state_dict['encoder.embed_positions.weights']
state_dict['encoder.embed_positions._float_tensor'] = torch.FloatTensor(1)
if utils.item(state_dict.get('encoder.version', torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict['encoder.version'] = torch.Tensor([1])
return state_dict
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False, left_pad=False, final_norm=True):
super().__init__(dictionary)
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
output_embed_dim = args.decoder_output_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.project_in_dim = Linear(input_embed_dim, embed_dim, bias=False,
uniform=False) if embed_dim != input_embed_dim else None
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
self.adaptive_softmax = None
self.project_out_dim = Linear(embed_dim, output_embed_dim,
bias=False, uniform=False) if embed_dim != output_embed_dim else None
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary), output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
)
elif not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(len(dictionary), output_embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=output_embed_dim ** -0.5)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = args.decoder_normalize_before and final_norm
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out['encoder_out'] if encoder_out is not None else None,
encoder_out['encoder_padding_mask'] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x) if incremental_state is None else None,
)
inner_states.append(x)
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
if self.adaptive_softmax is None:
# project back to size of vocabulary
if self.share_input_output_embed:
x = F.linear(x, self.embed_tokens.weight)
else:
x = F.linear(x, self.embed_out)
return x, {'attn': attn, 'inner_states': inner_states}
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions())
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
return self._future_mask[:dim, :dim]
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
if 'decoder.embed_positions.weights' in state_dict:
del state_dict['decoder.embed_positions.weights']
state_dict['decoder.embed_positions._float_tensor'] = torch.FloatTensor(1)
for i in range(len(self.layers)):
# update layer norms
layer_norm_map = {
'0': 'self_attn_layer_norm',
'1': 'encoder_attn_layer_norm',
'2': 'final_layer_norm'
}
for old, new in layer_norm_map.items():
for m in ('weight', 'bias'):
k = 'decoder.layers.{}.layer_norms.{}.{}'.format(i, old, m)
if k in state_dict:
state_dict['decoder.layers.{}.{}.{}'.format(i, new, m)] = state_dict[k]
del state_dict[k]
if utils.item(state_dict.get('decoder.version', torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict['decoder.version'] = torch.Tensor([1])
return state_dict
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
n_layernorm = 2
self.fc_factor = 1.0
self.macaron = getattr(args, "macaron", False)
if self.macaron:
self.macaron_fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.macaron_fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.fc_factor = 0.5
n_layernorm += 1
self.layer_norms = nn.ModuleList([LayerNorm(self.embed_dim) for i in range(n_layernorm)])
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
if self.macaron:
residual = x
x = self.maybe_layer_norm(2, x, before=True)
x = F.relu(self.macaron_fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.macaron_fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + self.fc_factor * x
x = self.maybe_layer_norm(2, x, after=True)
residual = x
x = self.maybe_layer_norm(0, x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(0, x, after=True)
residual = x
x = self.maybe_layer_norm(1, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + self.fc_factor * x
x = self.maybe_layer_norm(1, x, after=True)
return x
def maybe_layer_norm(self, i, x, before=False, after=False):
assert before ^ after
if after ^ self.normalize_before:
return self.layer_norms[i](x)
else:
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
"""
def __init__(self, args, no_encoder_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.decoder_normalize_before
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.fc_factor = 1.0
self.macaron = getattr(args, "macaron", False)
if self.macaron:
self.macaron_fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.macaron_fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.macaron_layer_norm = LayerNorm(self.embed_dim)
self.fc_factor = 0.5
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def forward(self, x, encoder_out, encoder_padding_mask, incremental_state,
prev_self_attn_state=None, prev_attn_state=None, self_attn_mask=None,
self_attn_padding_mask=None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(batch, src_len, embed_dim)`
"""
if self.macaron:
residual = x
x = self.maybe_layer_norm(self.macaron_layer_norm, x, before=True)
x = F.relu(self.macaron_fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.macaron_fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + self.fc_factor * x
x = self.maybe_layer_norm(self.macaron_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)
if prev_self_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_self_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.self_attn._set_input_buffer(incremental_state, saved_state)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)
attn = None
if self.encoder_attn is not None:
residual = x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)
if prev_attn_state is not None:
if incremental_state is None:
incremental_state = {}
prev_key, prev_value = prev_attn_state
saved_state = {"prev_key": prev_key, "prev_value": prev_value}
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=(not self.training and self.need_attn),
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)
residual = x
x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + self.fc_factor * x
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | true |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/models/fairseq_decoder.py | translation/fairseq/models/fairseq_decoder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn as nn
import torch.nn.functional as F
class FairseqDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, prev_output_tokens, encoder_out):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
Returns:
tuple:
- the last decoder layer's output of shape
`(batch, tgt_len, vocab)`
- the last decoder layer's attention weights of shape
`(batch, tgt_len, src_len)`
"""
raise NotImplementedError
def get_normalized_probs(self, net_output, log_probs, sample):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None:
assert sample is not None and 'target' in sample
out = self.adaptive_softmax.get_log_prob(net_output[0], sample['target'])
return out.exp_() if not log_probs else out
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def max_positions(self):
"""Maximum input length supported by the decoder."""
return 1e6 # an arbitrary large number
def upgrade_state_dict(self, state_dict):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/fairseq_optimizer.py | translation/fairseq/optim/fairseq_optimizer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
class FairseqOptimizer(object):
def __init__(self, args, params):
super().__init__()
self.args = args
self.params = list(params)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
pass
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, '_optimizer'):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
if optimizer_overrides is not None and len(optimizer_overrides) > 0:
# override learning rate, momentum, etc. with latest values
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
loss.backward()
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
for p in self.params:
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm."""
if max_norm > 0:
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(p.grad.data.norm()**2 for p in self.params))
def step(self, closure=None):
"""Performs a single optimization step."""
self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.optimizer.zero_grad()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/adam.py | translation/fairseq/optim/adam.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = Adam(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': eval(self.args.adam_betas),
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
}
class Adam(torch.optim.Optimizer):
"""Implements Adam algorithm.
This implementation is modified from torch.optim.Adam based on:
`Fixed Weight Decay Regularization in Adam`
(see https://arxiv.org/abs/1711.05101)
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * group['lr'], p.data)
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/nag.py | translation/fairseq/optim/nag.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.optim.optimizer import Optimizer, required
from . import FairseqOptimizer, register_optimizer
@register_optimizer('nag')
class FairseqNAG(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = NAG(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
lr_old = group.get('lr_old', lr)
lr_correct = lr / lr_old
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
param_state['momentum_buffer'] = d_p.clone().zero_()
buf = param_state['momentum_buffer']
if weight_decay != 0:
p.data.mul_(1 - lr * weight_decay)
p.data.add_(momentum * momentum * lr_correct, buf)
p.data.add_(-(1 + momentum) * lr, d_p)
buf.mul_(momentum * lr_correct).add_(-lr, d_p)
group['lr_old'] = lr
return loss
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/adagrad.py | translation/fairseq/optim/adagrad.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adagrad')
class Adagrad(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'weight_decay': self.args.weight_decay,
}
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/fp16_optimizer.py | translation/fairseq/optim/fp16_optimizer.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import optim, utils
class DynamicLossScaler:
def __init__(self, init_scale=2.**15, scale_factor=2., scale_window=2000):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self._iter = 0
self._last_overflow_iter = -1
def update_scale(self, overflow):
if overflow:
self.loss_scale /= self.scale_factor
self._last_overflow_iter = self._iter
elif (self._iter - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._iter += 1
@staticmethod
def has_overflow(grad_norm):
# detect inf and nan
if grad_norm == float('inf') or grad_norm != grad_norm:
return True
return False
class FP16Optimizer(optim.FairseqOptimizer):
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args, params)
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
self.scaler = DynamicLossScaler(
init_scale=args.fp16_init_scale,
scale_window=(2**14 / args.distributed_world_size),
)
@staticmethod
def build_optimizer(args, params):
# create FP32 copy of parameters and grads
total_param_size = sum(p.data.numel() for p in params)
fp32_params = params[0].new(0).float().new(total_param_size)
offset = 0
for p in params:
numel = p.data.numel()
fp32_params[offset:offset+numel].copy_(p.data.view(-1))
offset += numel
fp32_params = torch.nn.Parameter(fp32_params)
fp32_params.grad = fp32_params.data.new(total_param_size)
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
return FP16Optimizer(args, params, fp32_optimizer, fp32_params)
@property
def optimizer(self):
return self.fp32_optimizer.optimizer
@property
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def state_dict(self):
"""Return the optimizer's state dict."""
state_dict = self.fp32_optimizer.state_dict()
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
if 'loss_scale' in state_dict:
self.scaler.loss_scale = state_dict['loss_scale']
self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides)
def backward(self, loss):
loss = loss * self.scaler.loss_scale
loss.backward()
self._needs_sync = True
def _sync_fp16_grads_to_fp32(self, multiply_grads=1.):
if self._needs_sync:
# copy FP16 grads to FP32
offset = 0
for p in self.params:
if not p.requires_grad:
continue
grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape)
numel = grad_data.numel()
self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1))
offset += numel
# correct for dynamic loss scaler
self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale)
self._needs_sync = False
def multiply_grads(self, c):
"""Multiplies grads by a constant ``c``."""
if self._needs_sync:
self._sync_fp16_grads_to_fp32(c)
else:
self.fp32_params.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
"""Clips gradient norm and updates dynamic loss scaler."""
self._sync_fp16_grads_to_fp32()
grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm)
# detect overflow and adjust loss scale
overflow = DynamicLossScaler.has_overflow(grad_norm)
self.scaler.update_scale(overflow)
if overflow:
if self.scaler.loss_scale <= self.args.min_loss_scale:
raise Exception((
'Minimum loss scale reached ({}). Your loss is probably exploding. '
'Try lowering the learning rate, using gradient clipping or '
'increasing the batch size.'
).format(self.args.min_loss_scale))
raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale))
return grad_norm
def step(self, closure=None):
"""Performs a single optimization step."""
self._sync_fp16_grads_to_fp32()
self.fp32_optimizer.step(closure)
# copy FP32 params back into FP16 model
offset = 0
for p in self.params:
if not p.requires_grad:
continue
numel = p.data.numel()
p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data))
offset += numel
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
self.fp32_optimizer.zero_grad()
for p in self.params:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
self._needs_sync = False
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/__init__.py | translation/fairseq/optim/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_optimizer import FairseqOptimizer
from .fp16_optimizer import FP16Optimizer
OPTIMIZER_REGISTRY = {}
OPTIMIZER_CLASS_NAMES = set()
def build_optimizer(args, params):
params = list(filter(lambda p: p.requires_grad, params))
return OPTIMIZER_REGISTRY[args.optimizer](args, params)
def register_optimizer(name):
"""Decorator to register a new optimizer."""
def register_optimizer_cls(cls):
if name in OPTIMIZER_REGISTRY:
raise ValueError('Cannot register duplicate optimizer ({})'.format(name))
if not issubclass(cls, FairseqOptimizer):
raise ValueError('Optimizer ({}: {}) must extend FairseqOptimizer'.format(name, cls.__name__))
if cls.__name__ in OPTIMIZER_CLASS_NAMES:
# We use the optimizer class name as a unique identifier in
# checkpoints, so all optimizer must have unique class names.
raise ValueError('Cannot register optimizer with duplicate class name ({})'.format(cls.__name__))
OPTIMIZER_REGISTRY[name] = cls
OPTIMIZER_CLASS_NAMES.add(cls.__name__)
return cls
return register_optimizer_cls
# automatically import any Python files in the optim/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.' + module)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/sgd.py | translation/fairseq/optim/sgd.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('sgd')
class SGD(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/fixed_schedule.py | translation/fairseq/optim/lr_scheduler/fixed_schedule.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
# set defaults
args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0
self.lr = args.lr[0]
if args.warmup_updates > 0:
self.warmup_factor = 1. / args.warmup_updates
else:
self.warmup_factor = 1
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
def get_next_lr(self, epoch):
lrs = self.args.lr
if self.args.force_anneal is None or epoch < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal)
return next_lr
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates:
self.warmup_factor = num_updates / float(self.args.warmup_updates)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py | translation/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('cosine')
class CosineSchedule(FairseqLRScheduler):
"""Assign LR based on a cyclical schedule that follows the cosine function.
See https://arxiv.org/pdf/1608.03983.pdf for details
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`--warmup-init-lr`) until the configured
learning rate (`--lr`).
During warmup:
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup:
lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i))
where
t_curr is current percentage of updates within the current period range
t_i is the current period range, which is scaled by t_mul after every iteration
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with cosine.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.max_lr
if args.warmup_init_lr < 0:
args.warmup_init_lr = args.lr[0]
self.min_lr = args.lr[0]
self.max_lr = args.max_lr
assert self.max_lr > self.min_lr, 'max_lr must be more than lr'
self.t_mult = args.t_mult
self.period = args.lr_period_updates
if args.warmup_updates > 0:
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = args.warmup_updates
self.lr_shrink = args.lr_shrink
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR',
help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
help='initial number of updates per period')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates * self.lr_step
else:
curr_updates = num_updates - self.args.warmup_updates
if self.t_mult != 1:
i = math.floor(math.log(1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult))
t_i = self.t_mult ** i * self.period
t_curr = curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period
else:
i = math.floor(curr_updates / self.period)
t_i = self.period
t_curr = curr_updates - (self.period * i)
lr_shrink = self.lr_shrink ** i
min_lr = self.min_lr * lr_shrink
max_lr = self.max_lr * lr_shrink
self.lr = min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_curr / t_i))
self.optimizer.set_lr(self.lr)
return self.lr | python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py | translation/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('inverse_sqrt')
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`--warmup-init-lr`) until the configured
learning rate (`--lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = args.lr * sqrt(args.warmup_updates)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with inverse_sqrt.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates*self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py | translation/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('triangular')
class TriangularSchedule(FairseqLRScheduler):
"""Assign LR based on a triangular cyclical schedule.
See https://arxiv.org/pdf/1506.01186.pdf for details
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with triangular.'
' Consider --lr-scheduler=fixed instead.'
)
lr = args.lr[0]
assert args.max_lr > lr, 'max_lr must be more than lr'
self.min_lr = lr
self.max_lr = args.max_lr
self.stepsize = args.lr_period_updates // 2
self.lr_shrink = args.lr_shrink
self.shrink_min = args.shrink_min
# initial learning rate
self.lr = self.min_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--max-lr', required=True, type=float, metavar='LR',
help='max learning rate, must be more than args.lr')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR',
help='initial number of updates per period (cycle length)')
parser.add_argument('--shrink-min', action='store_true',
help='if set, also shrinks min lr')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
cycle = math.floor(num_updates / (2 * self.stepsize))
lr_shrink = self.lr_shrink ** cycle
max_lr = self.max_lr * lr_shrink
if self.shrink_min:
min_lr = self.min_lr * lr_shrink
else:
min_lr = self.min_lr
x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1)
self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x))
self.optimizer.set_lr(self.lr)
return self.lr
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py | translation/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .. import FairseqOptimizer
class FairseqLRScheduler(object):
def __init__(self, args, optimizer):
super().__init__()
if not isinstance(optimizer, FairseqOptimizer):
raise ValueError('optimizer must be an instance of FairseqOptimizer')
self.args = args
self.optimizer = optimizer
self.best = None
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
pass
def state_dict(self):
"""Return the LR scheduler state dict."""
return {'best': self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict['best']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/__init__.py | translation/fairseq/optim/lr_scheduler/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_lr_scheduler import FairseqLRScheduler
LR_SCHEDULER_REGISTRY = {}
def build_lr_scheduler(args, optimizer):
return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer)
def register_lr_scheduler(name):
"""Decorator to register a new LR scheduler."""
def register_lr_scheduler_cls(cls):
if name in LR_SCHEDULER_REGISTRY:
raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name))
if not issubclass(cls, FairseqLRScheduler):
raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__))
LR_SCHEDULER_REGISTRY[name] = cls
return cls
return register_lr_scheduler_cls
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.lr_scheduler.' + module)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py | translation/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim.lr_scheduler
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('reduce_lr_on_plateau')
class ReduceLROnPlateau(FairseqLRScheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.'
' Consider --lr-scheduler=fixed instead.'
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer, patience=0, factor=args.lr_shrink)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
self.lr_scheduler.step(val_loss, epoch)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/criterions/label_smoothed_cross_entropy.py | translation/fairseq/criterions/label_smoothed_cross_entropy.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('label_smoothed_cross_entropy')
class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.eps = args.label_smoothing
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
return {
'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/criterions/cross_entropy.py | translation/fairseq/criterions/cross_entropy.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
if sample_size != ntokens:
agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
return agg_output
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/criterions/fairseq_criterion.py | translation/fairseq/criterions/fairseq_criterion.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.nn.modules.loss import _Loss
class FairseqCriterion(_Loss):
def __init__(self, args, task):
super().__init__()
self.args = args
self.padding_idx = task.target_dictionary.pad()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
pass
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
raise NotImplementedError
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError
@staticmethod
def grad_denom(sample_sizes):
"""Compute the gradient denominator for a set of sample sizes."""
return sum(sample_sizes)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/criterions/__init__.py | translation/fairseq/criterions/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_criterion import FairseqCriterion
CRITERION_REGISTRY = {}
CRITERION_CLASS_NAMES = set()
def build_criterion(args, task):
return CRITERION_REGISTRY[args.criterion](args, task)
def register_criterion(name):
"""Decorator to register a new criterion."""
def register_criterion_cls(cls):
if name in CRITERION_REGISTRY:
raise ValueError('Cannot register duplicate criterion ({})'.format(name))
if not issubclass(cls, FairseqCriterion):
raise ValueError('Criterion ({}: {}) must extend FairseqCriterion'.format(name, cls.__name__))
if cls.__name__ in CRITERION_CLASS_NAMES:
# We use the criterion class name as a unique identifier in
# checkpoints, so all criterions must have unique class names.
raise ValueError('Cannot register criterion with duplicate class name ({})'.format(cls.__name__))
CRITERION_REGISTRY[name] = cls
CRITERION_CLASS_NAMES.add(cls.__name__)
return cls
return register_criterion_cls
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.criterions.' + module)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/criterions/adaptive_loss.py | translation/fairseq/criterions/adaptive_loss.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('adaptive_loss')
class AdaptiveLoss(FairseqCriterion):
"""This is an implementation of the loss function accompanying the adaptive softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs"
(http://arxiv.org/abs/1609.04309)."""
def __init__(self, args, task):
super().__init__(args, task)
if args.ddp_backend == 'c10d':
raise Exception(
'AdaptiveLoss is not compatible with the c10d '
'version of DistributedDataParallel. Please use '
'`--ddp-backend=no_c10d` instead.'
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert hasattr(model.decoder, 'adaptive_softmax') and model.decoder.adaptive_softmax is not None
adaptive_softmax = model.decoder.adaptive_softmax
net_output = model(**sample['net_input'])
orig_target = model.get_targets(sample, net_output)
nsentences = orig_target.size(0)
orig_target = orig_target.view(-1)
bsz = orig_target.size(0)
logits, target = adaptive_softmax(net_output[0], orig_target)
assert len(target) == len(logits)
loss = net_output[0].new(1 if reduce else bsz).zero_()
for i in range(len(target)):
if target[i] is not None:
assert (target[i].min() >= 0 and target[i].max() <= logits[i].size(1))
loss += F.cross_entropy(logits[i], target[i], size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
orig = utils.strip_pad(orig_target, self.padding_idx)
ntokens = orig.numel()
sample_size = sample['target'].size(0) if self.args.sentence_avg else ntokens
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'nll_loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
if sample_size != ntokens:
agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
return agg_output
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/linearized_convolution.py | translation/fairseq/modules/linearized_convolution.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn.functional as F
from fairseq import utils
from .conv_tbc import ConvTBC
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def forward(self, input, incremental_state=None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = super().forward(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[:-self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
self._linearized_weight = weight.view(self.out_channels, -1)
return self._linearized_weight
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/beamable_mm.py | translation/fairseq/modules/beamable_mm.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn as nn
class BeamableMM(nn.Module):
"""This module provides an optimized MM for beam decoding with attention.
It leverage the fact that the source-side of the input is replicated beam
times and the target-side of the input is of width one. This layer speeds up
inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
"""
def __init__(self, beam_size=None):
super(BeamableMM, self).__init__()
self.beam_size = beam_size
def forward(self, input1, input2):
if (
not self.training and # test mode
self.beam_size is not None and # beam size is set
input1.dim() == 3 and # only support batched input
input1.size(1) == 1 # single time step update
):
bsz, beam = input1.size(0), self.beam_size
# bsz x 1 x nhu --> bsz/beam x beam x nhu
input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
# bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
# use non batched operation if bsz = beam
if input1.size(0) == 1:
output = torch.mm(input1[0, :, :], input2[0, :, :])
else:
output = input1.bmm(input2)
return output.view(bsz, 1, -1)
else:
return input1.bmm(input2)
def set_beam_size(self, beam_size):
self.beam_size = beam_size
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/grad_multiply.py | translation/fairseq/modules/grad_multiply.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/conv_tbc.py | translation/fairseq/modules/conv_tbc.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from torch.nn.modules.utils import _single
class ConvTBC(torch.nn.Module):
"""1D convolution over an input of shape (time x batch x channel)
The implementation uses gemm to perform the convolution. This implementation
is faster than cuDNN for small kernel sizes.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _single(kernel_size)
self.padding = _single(padding)
self.weight = torch.nn.Parameter(torch.Tensor(
self.kernel_size[0], in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding[0])
def __repr__(self):
s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
', padding={padding}')
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/highway.py | translation/fairseq/modules/highway.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn.functional as F
from torch import nn
class Highway(torch.nn.Module):
"""
A `Highway layer <https://arxiv.org/abs/1505.00387>`_.
Adopted from the AllenNLP implementation.
"""
def __init__(
self,
input_dim: int,
num_layers: int = 1
):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList([nn.Linear(input_dim, input_dim * 2)
for _ in range(num_layers)])
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
# As per comment in AllenNLP:
# We should bias the highway layer to just carry its input forward. We do that by
# setting the bias on `B(x)` to be positive, because that means `g` will be biased to
# be high, so we will carry the input forward. The bias on `B(x)` is the second half
# of the bias vector in each Linear layer.
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(
self,
x: torch.Tensor
):
for layer in self.layers:
projection = layer(x)
proj_x, gate = projection.chunk(2, dim=-1)
proj_x = self.activation(proj_x)
gate = F.sigmoid(gate)
x = gate * x + (1 - gate) * proj_x
return x
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/downsampled_multihead_attention.py | translation/fairseq/modules/downsampled_multihead_attention.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.scalar_bias import scalar_bias
class SingleHeadAttention(nn.Module):
"""
Single-head attention that supports Gating and Downsampling
"""
def __init__(
self, out_channels, embed_dim, head_dim, head_index, dropout=0.,
bias=True, project_input=True, gated=False, downsample=False,
num_heads=1,
):
super().__init__()
self.embed_dim = embed_dim
self.dropout = dropout
self.head_index = head_index
self.head_dim = head_dim
self.project_input = project_input
self.gated = gated
self.downsample = downsample
self.num_heads = num_heads
self.projection = None
k_layers = []
v_layers = []
if self.downsample:
k_layers.append(Downsample(self.head_index))
v_layers.append(Downsample(self.head_index))
out_proj_size = self.head_dim
else:
out_proj_size = self.head_dim * self.num_heads
if self.gated:
k_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = GatedLinear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(GatedLinear(self.embed_dim, out_proj_size, bias=bias))
else:
k_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_q = Linear(self.embed_dim, out_proj_size, bias=bias)
v_layers.append(Linear(self.embed_dim, out_proj_size, bias=bias))
self.in_proj_k = nn.Sequential(*k_layers)
self.in_proj_v = nn.Sequential(*v_layers)
if self.downsample:
self.out_proj = Linear(out_proj_size, self.head_dim, bias=bias)
else:
self.out_proj = Linear(out_proj_size, out_channels, bias=bias)
self.scaling = self.head_dim**-0.5
def forward(
self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, use_scalar_bias=False,
):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Future timesteps can be masked with the
`mask_future_timesteps` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
src_len, bsz, out_channels = key.size()
tgt_len = query.size(0)
assert list(query.size()) == [tgt_len, bsz, out_channels]
assert key.size() == value.size()
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.downsample:
size = bsz
else:
size = bsz * self.num_heads
k = key
v = value
q = query
if self.project_input:
q = self.in_proj_q(q)
k = self.in_proj_k(k)
v = self.in_proj_v(v)
src_len = k.size()[0]
q *= self.scaling
if not self.downsample:
q = q.view(tgt_len, size, self.head_dim)
k = k.view(src_len, size, self.head_dim)
v = v.view(src_len, size, self.head_dim)
q = q.transpose(0, 1)
k = k.transpose(0, 1)
v = v.transpose(0, 1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if mask_future_timesteps:
assert query.size() == key.size(), \
'mask_future_timesteps only applies to self-attention'
attn_weights *= torch.tril(
attn_weights.data.new([1]).expand(tgt_len, tgt_len).clone(),
diagonal=-1,
)[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0)
attn_weights += torch.triu(
attn_weights.data.new([-math.inf]).expand(tgt_len, tgt_len).clone(),
diagonal=0
)[:, ::self.head_index + 1 if self.downsample else 1].unsqueeze(0)
tgt_size = tgt_len
if use_scalar_bias:
attn_weights = scalar_bias(attn_weights, 2)
v = scalar_bias(v, 1)
tgt_size += 1
if key_padding_mask is not None:
# don't attend to padding symbols
if key_padding_mask.max() > 0:
if self.downsample:
attn_weights = attn_weights.view(bsz, 1, tgt_len, src_len)
else:
attn_weights = attn_weights.view(size, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-math.inf,
)
attn_weights = attn_weights.view(size, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
if self.downsample:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.head_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, self.embed_dim)
attn = self.out_proj(attn)
return attn, attn_weights
class DownsampledMultiHeadAttention(nn.ModuleList):
"""
Multi-headed attention with Gating and Downsampling
"""
def __init__(
self, out_channels, embed_dim, num_heads, dropout=0., bias=True,
project_input=True, gated=False, downsample=False,
):
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.downsample = downsample
self.gated = gated
self.project_input = project_input
assert self.head_dim * num_heads == embed_dim
if self.downsample:
attention_heads = []
for index in range(self.num_heads):
attention_heads.append(
SingleHeadAttention(
out_channels, self.embed_dim, self.head_dim, index,
self.dropout, bias, self.project_input, self.gated,
self.downsample, self.num_heads,
)
)
super().__init__(modules=attention_heads)
self.out_proj = Linear(embed_dim, out_channels, bias=bias)
else:
# either we have a list of attention heads, or just one attention head
# if not being downsampled, we can do the heads with one linear layer instead of separate ones
super().__init__()
self.attention_module = SingleHeadAttention(
out_channels, self.embed_dim, self.head_dim, 1, self.dropout,
bias, self.project_input, self.gated, self.downsample, self.num_heads,
)
def forward(
self, query, key, value, mask_future_timesteps=False,
key_padding_mask=None, use_scalar_bias=False,
):
src_len, bsz, embed_dim = key.size()
tgt_len = query.size(0)
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
tgt_size = tgt_len
if use_scalar_bias:
tgt_size += 1
attn = []
attn_weights = []
if self.downsample:
for attention_head_number in range(self.num_heads):
# call the forward of each attention head
_attn, _attn_weight = self[attention_head_number](
query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn = self.out_proj(full_attn)
return full_attn, attn_weights[0].clone()
else:
_attn, _attn_weight = self.attention_module(
query, key, value, mask_future_timesteps, key_padding_mask, use_scalar_bias,
)
attn.append(_attn)
attn_weights.append(_attn_weight)
full_attn = torch.cat(attn, dim=2)
full_attn_weights = torch.cat(attn_weights)
full_attn_weights = full_attn_weights.view(bsz, self.num_heads, tgt_size, src_len)
full_attn_weights = full_attn_weights.sum(dim=1) / self.num_heads
return full_attn, full_attn_weights
class Downsample(nn.Module):
"""
Selects every nth element, where n is the index
"""
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x[::self.index+1]
def Linear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def GatedLinear(in_features, out_features, dropout=0., bias=True):
"""Weight-normalized Linear layer (input: B x T x C) with interspersed GLU units"""
return nn.Sequential(
Linear(in_features, out_features*4, dropout, bias),
nn.GLU(),
Linear(out_features*2, out_features*2, dropout, bias),
nn.GLU(),
Linear(out_features, out_features, dropout, bias)
)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/multihead_attention.py | translation/fairseq/modules/multihead_attention.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
from fairseq import utils
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, key_padding_mask=None, incremental_state=None,
need_weights=True, static_kv=False, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert kv_same and not qkv_same
key = value = None
else:
saved_state = None
if qkv_same:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif kv_same:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if saved_state is not None:
if 'prev_key' in saved_state:
if static_kv:
k = saved_state['prev_key']
else:
k = torch.cat((saved_state['prev_key'], k), dim=0)
if 'prev_value' in saved_state:
if static_kv:
v = saved_state['prev_value']
else:
v = torch.cat((saved_state['prev_value'], v), dim=0)
saved_state['prev_key'] = k
saved_state['prev_value'] = v
self._set_input_buffer(incremental_state, saved_state)
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
src_len = k.size(0)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_weights += attn_mask.unsqueeze(0)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.float().masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
).type_as(attn_weights) # FP16 support: cast to float and back
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
# average attention weights over heads
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
else:
attn_weights = None
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def reorder_incremental_state(self, incremental_state, new_order):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer[k] = input_buffer[k].index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
utils.set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/scalar_bias.py | translation/fairseq/modules/scalar_bias.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import torch
class ScalarBias(torch.autograd.Function):
"""
Adds a vector of scalars, used in self-attention mechanism to allow
the model to optionally attend to this vector instead of the past
"""
@staticmethod
def forward(ctx, input, dim, bias_init):
size = list(input.size())
size[dim] += 1
output = input.new(*size).fill_(bias_init)
output.narrow(dim, 1, size[dim] - 1).copy_(input)
ctx.dim = dim
return output
@staticmethod
def backward(ctx, grad):
return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None
def scalar_bias(input, dim, bias_init=0):
return ScalarBias.apply(input, dim, bias_init)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/__init__.py | translation/fairseq/modules/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .adaptive_softmax import AdaptiveSoftmax
from .beamable_mm import BeamableMM
from .character_token_embedder import CharacterTokenEmbedder
from .conv_tbc import ConvTBC
from .downsampled_multihead_attention import DownsampledMultiHeadAttention
from .grad_multiply import GradMultiply
from .highway import Highway
from .learned_positional_embedding import LearnedPositionalEmbedding
from .linearized_convolution import LinearizedConvolution
from .multihead_attention import MultiheadAttention
from .scalar_bias import ScalarBias
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
__all__ = [
'AdaptiveSoftmax',
'BeamableMM',
'CharacterTokenEmbedder',
'ConvTBC',
'DownsampledMultiHeadAttention',
'GradMultiply',
'Highway',
'LearnedPositionalEmbedding',
'LinearizedConvolution',
'MultiheadAttention',
'ScalarBias',
'SinusoidalPositionalEmbedding',
]
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/adaptive_softmax.py | translation/fairseq/modules/adaptive_softmax.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.nn.functional as F
from torch import nn
class AdaptiveSoftmax(nn.Module):
"""
This is an implementation of the efficient softmax approximation for
graphical processing units (GPU), described in the paper "Efficient softmax
approximation for GPUs" (http://arxiv.org/abs/1609.04309).
"""
def __init__(self, vocab_size, input_dim, cutoff, dropout):
super().__init__()
if vocab_size > cutoff[-1]:
cutoff = cutoff + [vocab_size]
else:
assert vocab_size == cutoff[
-1], 'cannot specify cutoff larger than vocab size'
output_dim = cutoff[0] + len(cutoff) - 1
self.vocab_size = vocab_size
self.cutoff = cutoff
self.dropout = dropout
self.input_dim = input_dim
self.lsm = nn.LogSoftmax(dim=1)
self.head = nn.Linear(input_dim, output_dim, bias=False)
self._make_tail(True)
def init_weights(m):
if hasattr(m, 'weight'):
nn.init.xavier_uniform_(m.weight)
self.apply(init_weights)
self.register_buffer('version', torch.LongTensor([1]))
# versions prior to 1 had a bug that offset indices on the head by 1
self.buggy_offset = 0
def _make_tail(self, fix_exponent):
extra_denom = 1 if fix_exponent else 0
self.tail = nn.ModuleList()
for i in range(len(self.cutoff) - 1):
self.tail.append(
nn.Sequential(
nn.Linear(self.input_dim, self.input_dim // 4 ** (i + extra_denom), bias=False),
nn.Dropout(self.dropout),
nn.Linear(self.input_dim // 4 ** (i + extra_denom), self.cutoff[i + 1] - self.cutoff[i], bias=False)
)
)
def upgrade_state_dict_named(self, state_dict, name):
version_name = name + '.version'
if version_name not in state_dict:
self.buggy_offset = 1
self._make_tail(False)
state_dict[version_name] = torch.LongTensor([1])
def adapt_target(self, target):
"""
In order to be efficient, the AdaptiveSoftMax does not compute the
scores for all the word of the vocabulary for all the examples. It is
thus necessary to call the method adapt_target of the AdaptiveSoftMax
layer inside each forward pass.
"""
target = target.view(-1)
new_target = [target.clone()]
target_idxs = []
for i in range(len(self.cutoff) - 1):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))
new_target[0][mask] = self.cutoff[0] + i - self.buggy_offset
if mask.any():
target_idxs.append(mask.nonzero().squeeze(1))
new_target.append(target[mask].add(-self.cutoff[i]))
else:
target_idxs.append(None)
new_target.append(None)
return new_target, target_idxs
def forward(self, input, target):
"""
Args:
input: (b x t x d)
target: (b x t)
Returns:
2 lists: output for each cutoff section and new targets by cut off
"""
input = input.contiguous().view(-1, input.size(-1))
input = F.dropout(input, p=self.dropout, training=self.training)
new_target, target_idxs = self.adapt_target(target)
output = [self.head(input)]
for i in range(len(target_idxs)):
if target_idxs[i] is not None:
output.append(self.tail[i](input.index_select(0, target_idxs[i])))
else:
output.append(None)
return output, new_target
def get_log_prob(self, input, target):
"""
Computes the log probabilities for all the words of the vocabulary,
given a 2D tensor of hidden vectors.
"""
bsz, length, dim = input.size()
input = input.contiguous().view(-1, dim)
if target is not None:
_, target_idxs = self.adapt_target(target)
else:
target_idxs = None
head_y = self.head(input)
log_probs = head_y.new_zeros(input.size(0), self.vocab_size)
head_sz = self.cutoff[0] + len(self.tail)
log_probs[:, :head_sz] = self.lsm(head_y)
tail_priors = log_probs[:, self.cutoff[0] - self.buggy_offset: head_sz - self.buggy_offset].clone()
for i in range(len(self.tail)):
start = self.cutoff[i]
end = self.cutoff[i + 1]
if target_idxs is None:
tail_out = log_probs[:, start:end]
tail_out.copy_(self.tail[i](input))
log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])
elif target_idxs[i] is not None:
idxs = target_idxs[i]
tail_out = log_probs[idxs, start:end]
tail_out.copy_(self.tail[i](input[idxs]))
log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])
log_probs = log_probs.view(bsz, length, -1)
return log_probs
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/sinusoidal_positional_embedding.py | translation/fairseq/modules/sinusoidal_positional_embedding.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn as nn
import torch.onnx.operators
from fairseq import utils
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
def __init__(self, embedding_dim, padding_idx, left_pad, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.left_pad = left_pad
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.onnx_trace = False
self.register_buffer('_float_tensor', torch.FloatTensor(1))
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = torch.onnx.operators.shape_as_tensor(input)
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.type_as(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = (timestep.int() + 1).long() if timestep is not None else seq_len
if self.onnx_trace:
return self.weights[self.padding_idx + pos, :].unsqueeze(1).repeat(bsz, 1, 1)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(input, self.padding_idx, self.left_pad, self.onnx_trace)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1])))
embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape)
return embeddings
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/character_token_embedder.py | translation/fairseq/modules/character_token_embedder.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from typing import List, Tuple
from .highway import Highway
from fairseq.data import Dictionary
class CharacterTokenEmbedder(torch.nn.Module):
def __init__(
self,
vocab: Dictionary,
filters: List[Tuple[int, int]],
char_embed_dim: int,
word_embed_dim: int,
highway_layers: int,
max_char_len: int = 50,
):
super(CharacterTokenEmbedder, self).__init__()
self.embedding_dim = word_embed_dim
self.char_embeddings = nn.Embedding(257, char_embed_dim, padding_idx=0)
self.symbol_embeddings = nn.Parameter(torch.FloatTensor(2, word_embed_dim))
self.eos_idx, self.unk_idx = 0, 1
self.convolutions = nn.ModuleList()
for width, out_c in filters:
self.convolutions.append(
nn.Conv1d(char_embed_dim, out_c, kernel_size=width)
)
final_dim = sum(f[1] for f in filters)
self.highway = Highway(final_dim, highway_layers)
self.projection = nn.Linear(final_dim, word_embed_dim)
self.set_vocab(vocab, max_char_len)
self.reset_parameters()
def set_vocab(self, vocab, max_char_len):
word_to_char = torch.LongTensor(len(vocab), max_char_len)
truncated = 0
for i in range(len(vocab)):
if i < vocab.nspecial:
char_idxs = [0] * max_char_len
else:
chars = vocab[i].encode()
# +1 for padding
char_idxs = [c + 1 for c in chars] + [0] * (max_char_len - len(chars))
if len(char_idxs) > max_char_len:
truncated += 1
char_idxs = char_idxs[:max_char_len]
word_to_char[i] = torch.LongTensor(char_idxs)
if truncated > 0:
print('Truncated {} words longer than {} characters'.format(truncated, max_char_len))
self.vocab = vocab
self.word_to_char = word_to_char
@property
def padding_idx(self):
return self.vocab.pad()
def reset_parameters(self):
nn.init.xavier_normal_(self.char_embeddings.weight)
nn.init.xavier_normal_(self.symbol_embeddings)
nn.init.xavier_normal_(self.projection.weight)
nn.init.constant_(self.char_embeddings.weight[self.char_embeddings.padding_idx], 0.)
nn.init.constant_(self.projection.bias, 0.)
def forward(
self,
words: torch.Tensor,
):
self.word_to_char = self.word_to_char.type_as(words)
flat_words = words.view(-1)
word_embs = self._convolve(self.word_to_char[flat_words])
pads = flat_words.eq(self.vocab.pad())
if pads.any():
word_embs[pads] = 0
eos = flat_words.eq(self.vocab.eos())
if eos.any():
word_embs[eos] = self.symbol_embeddings[self.eos_idx]
unk = flat_words.eq(self.vocab.unk())
if unk.any():
word_embs[unk] = self.symbol_embeddings[self.unk_idx]
return word_embs.view(words.size() + (-1,))
def _convolve(
self,
char_idxs: torch.Tensor,
):
char_embs = self.char_embeddings(char_idxs)
char_embs = char_embs.transpose(1, 2) # BTC -> BCT
conv_result = []
for i, conv in enumerate(self.convolutions):
x = conv(char_embs)
x, _ = torch.max(x, -1)
x = F.relu(x)
conv_result.append(x)
conv_result = torch.cat(conv_result, dim=-1)
conv_result = self.highway(conv_result)
return self.projection(conv_result)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/modules/learned_positional_embedding.py | translation/fairseq/modules/learned_positional_embedding.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn as nn
from fairseq import utils
class LearnedPositionalEmbedding(nn.Embedding):
"""This module learns positional embeddings up to a fixed maximum size.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx, left_pad):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.left_pad = left_pad
def forward(self, input, incremental_state=None):
"""Input is expected to be of size [bsz x seqlen]."""
if incremental_state is not None:
# positions is the same for every token when decoding a single step
positions = input.data.new(1, 1).fill_(self.padding_idx + input.size(1))
else:
positions = utils.make_positions(input.data, self.padding_idx, self.left_pad)
return super().forward(positions)
def max_positions(self):
"""Maximum number of supported positions."""
return self.num_embeddings - self.padding_idx - 1
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/data_utils.py | translation/fairseq/data/data_utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
import os
import numpy as np
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
for filename in os.listdir(path):
parts = filename.split('.')
if len(parts) >= 3 and len(parts[1].split('-')) == 2:
return parts[1].split('-')
return src, dst
def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
@contextlib.contextmanager
def numpy_seed(seed):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def collect_filtered(function, iterable, filtered):
"""
Similar to :func:`filter` but collects filtered elements in ``filtered``.
Args:
function (callable): function that returns ``False`` for elements that
should be filtered
iterable (iterable): iterable to filter
filtered (list): list to store filtered elements
"""
for el in iterable:
if function(el):
yield el
else:
filtered.append(el)
def filter_by_size(indices, size_fn, max_positions, raise_exception=False):
"""
Filter indices based on their size.
Args:
indices (List[int]): ordered list of dataset indices
size_fn (callable): function that returns the size of a given index
max_positions (tuple): filter elements larger than this size.
Comparisons are done component-wise.
raise_exception (bool, optional): if ``True``, raise an exception
if any elements are filtered. Default: ``False``
"""
def check_size(idx):
if isinstance(max_positions, float) or isinstance(max_positions, int):
return size_fn(idx) <= max_positions
else:
return all(a is None or b is None or a <= b
for a, b in zip(size_fn(idx), max_positions))
ignored = []
itr = collect_filtered(check_size, indices, ignored)
for idx in itr:
if len(ignored) > 0 and raise_exception:
raise Exception((
'Size of sample #{} is invalid (={}) since max_positions={}, '
'skip this example with --skip-invalid-size-inputs-valid-test'
).format(idx, size_fn(idx), max_positions))
yield idx
if len(ignored) > 0:
print((
'| WARNING: {} samples have invalid sizes and will be skipped, '
'max_positions={}, first few sample ids={}'
).format(len(ignored), max_positions, ignored[:10]))
def batch_by_size(
indices, num_tokens_fn, max_tokens=None, max_sentences=None,
required_batch_size_multiple=1,
):
"""
Yield mini-batches of indices bucketed by size. Batches may contain
sequences of different lengths.
Args:
indices (List[int]): ordered list of dataset indices
num_tokens_fn (callable): function that returns the number of tokens at
a given index
max_tokens (int, optional): max number of tokens in each batch.
Default: ``None``
max_sentences (int, optional): max number of sentences in each
batch. Default: ``None``
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N. Default: ``1``
"""
max_tokens = max_tokens if max_tokens is not None else float('Inf')
max_sentences = max_sentences if max_sentences is not None else float('Inf')
bsz_mult = required_batch_size_multiple
batch = []
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if len(batch) == max_sentences:
return True
if num_tokens > max_tokens:
return True
return False
sample_len = 0
sample_lens = []
ignored = []
for idx in indices:
sample_lens.append(num_tokens_fn(idx))
sample_len = max(sample_len, sample_lens[-1])
num_tokens = (len(batch) + 1) * sample_len
if is_batch_full(num_tokens):
mod_len = max(
bsz_mult * (len(batch) // bsz_mult),
len(batch) % bsz_mult,
)
yield batch[:mod_len]
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
yield batch
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/monolingual_dataset.py | translation/fairseq/data/monolingual_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import numpy as np
import torch
from . import data_utils, FairseqDataset
from typing import List
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key, is_list=False):
if is_list:
res = []
for i in range(len(samples[0][key])):
res.append(data_utils.collate_tokens(
[s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,
))
return res
else:
return data_utils.collate_tokens(
[s[key] for s in samples], pad_idx, eos_idx, left_pad=False,
)
is_target_list = isinstance(samples[0]['target'], list)
return {
'id': torch.LongTensor([s['id'] for s in samples]),
'ntokens': sum(len(s['source']) for s in samples),
'net_input': {
'src_tokens': merge('source'),
'src_lengths': torch.LongTensor([
s['source'].numel() for s in samples
]),
},
'target': merge('target', is_target_list),
'nsentences': samples[0]['source'].size(0),
}
class MonolingualDataset(FairseqDataset):
"""
A wrapper around torch.utils.data.Dataset for monolingual data.
Args:
dataset (torch.utils.data.Dataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
shuffle (bool, optional): shuffle the elements before batching.
Default: ``True``
"""
def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle,
targets=None):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = tgt_vocab
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
assert targets is None or all(
t in {'self', 'future', 'past'} for t in targets), "targets must be none or one of 'self', 'future', 'past'"
if targets is not None and len(targets) == 0:
targets = None
self.targets = targets
def __getitem__(self, index):
source, future_target, past_target = self.dataset[index]
source, target = self._make_source_target(source, future_target, past_target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if self.targets is not None:
target = []
if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \
and source[-1] != self.vocab.eos():
# append eos at the end of source
source = torch.cat([source, source.new([self.vocab.eos()])])
if 'future' in self.targets:
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if 'past' in self.targets:
# first token is before the start of sentence which is only used in "none" break mode when
# add_eos_for_other_targets is False
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]])
for t in self.targets:
if t == 'self':
target.append(source)
elif t == 'future':
target.append(future_target)
elif t == 'past':
target.append(past_target)
else:
raise Exception('invalid target ' + t)
if len(target) == 1:
target = target[0]
else:
target = future_target
return source, self._filter_vocab(target)
def _filter_vocab(self, target):
if len(self.tgt_vocab) != len(self.vocab):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the right.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the right.
"""
return collate(samples, self.vocab.pad(), self.vocab.eos())
def get_dummy_batch(self, num_tokens, max_positions, tgt_len=128):
"""Return a dummy batch with a given number of tokens."""
if isinstance(max_positions, float) or isinstance(max_positions, int):
tgt_len = min(tgt_len, max_positions)
bsz = num_tokens // tgt_len
target = self.vocab.dummy_sentence(tgt_len + 2)
source, past_target, future_target = target[1:-1], target[2:], target[:-2]
source, target = self._make_source_target(source, past_target, future_target)
return self.collater([
{'id': i, 'source': source, 'target': target}
for i in range(bsz)
])
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(np.flip(self.sizes, 0))
return np.lexsort(order)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/iterators.py | translation/fairseq/data/iterators.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import itertools
import math
import numpy as np
import torch
from . import data_utils
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
Attributes:
count (int): number of elements consumed from this iterator
"""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.count < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
class EpochBatchIterator(object):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler): an iterator over batches of
indices
seed (int, optional): seed for random number generator for
reproducibility. Default: ``1``
num_shards (int, optional): shard the data iterator into N
shards. Default: ``1``
shard_id (int, optional): which shard of the data iterator to
return. Default: ``0``
"""
def __init__(self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.frozen_batches = tuple(batch_sampler)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.epoch = 0
self._cur_epoch_itr = None
self._next_epoch_itr = None
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator. Default: ``True``
"""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._cur_epoch_itr
def end_of_epoch(self):
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle):
if shuffle:
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with data_utils.numpy_seed(self.seed + epoch):
batches = list(self.frozen_batches) # copy
np.random.shuffle(batches)
else:
batches = self.frozen_batches
return CountingIterator(torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),
))
class GroupedIterator(object):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
"""
def __init__(self, iterable, chunk_size):
self._len = int(math.ceil(len(iterable) / float(chunk_size)))
self.itr = iter(iterable)
self.chunk_size = chunk_size
def __len__(self):
return self._len
def __iter__(self):
return self
def __next__(self):
chunk = []
try:
for _ in range(self.chunk_size):
chunk.append(next(self.itr))
except StopIteration as e:
if len(chunk) == 0:
raise e
return chunk
class ShardedIterator(object):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards*. Default: ``None``
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/token_block_dataset.py | translation/fairseq/data/token_block_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import numpy as np
import torch
class TokenBlockDataset(torch.utils.data.Dataset):
"""Break a 1d tensor of tokens into blocks.
The blocks are fetched from the original tensor so no additional memory is allocated.
Args:
tokens: 1d tensor of tokens to break into blocks
sizes: sentence lengths (required for 'complete' and 'eos')
block_size: maximum block size (ignored in 'eos' break mode)
break_mode: Mode used for breaking tokens. Values can be one of:
- 'none': break tokens into equally sized blocks (up to block_size)
- 'complete': break tokens into blocks (up to block_size) such that
blocks contains complete sentences, although block_size may be
exceeded if some sentences exceed block_size
- 'eos': each block contains one sentence (block_size is ignored)
include_targets: return next tokens as targets
"""
def __init__(self, tokens, sizes, block_size, pad, eos, break_mode=None, include_targets=False):
super().__init__()
self.tokens = tokens
self.total_size = len(tokens)
self.pad = pad
self.eos = eos
self.include_targets = include_targets
self.slice_indices = []
if break_mode is None or break_mode == 'none':
length = math.ceil(len(tokens) / block_size)
def block_at(i):
start = i * block_size
end = min(start + block_size, len(tokens))
return (start, end)
self.slice_indices = [block_at(i) for i in range(length)]
elif break_mode == 'complete':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
tok_idx = 0
sz_idx = 0
curr_size = 0
while sz_idx < len(sizes):
if curr_size + sizes[sz_idx] <= block_size or curr_size == 0:
curr_size += sizes[sz_idx]
sz_idx += 1
else:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
tok_idx += curr_size
curr_size = 0
if curr_size > 0:
self.slice_indices.append((tok_idx, tok_idx + curr_size))
elif break_mode == 'eos':
assert sizes is not None and sum(sizes) == len(tokens), '{} != {}'.format(sum(sizes), len(tokens))
curr = 0
for sz in sizes:
# skip samples with just 1 example (which would be just the eos token)
if sz > 1:
self.slice_indices.append((curr, curr + sz))
curr += sz
else:
raise ValueError('Invalid break_mode: ' + break_mode)
self.sizes = np.array([e - s for s, e in self.slice_indices])
def __getitem__(self, index):
s, e = self.slice_indices[index]
item = torch.LongTensor(self.tokens[s:e])
if self.include_targets:
# target is the sentence, for source, rotate item one token to the left (would start with eos)
# past target is rotated to the left by 2 (padded if its first)
if s == 0:
source = np.concatenate([[self.eos], self.tokens[0:e - 1]])
past_target = np.concatenate([[self.pad, self.eos], self.tokens[0:e - 2]])
else:
source = self.tokens[s - 1:e - 1]
if s == 1:
past_target = np.concatenate([[self.eos], self.tokens[0:e - 2]])
else:
past_target = self.tokens[s - 2:e - 2]
return torch.LongTensor(source), item, torch.LongTensor(past_target)
return item
def __len__(self):
return len(self.slice_indices)
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/fairseq_dataset.py | translation/fairseq/data/fairseq_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.utils.data
from fairseq.data import data_utils
class FairseqDataset(torch.utils.data.Dataset):
"""A dataset that provides helpers for batching."""
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
raise NotImplementedError
def get_dummy_batch(self, num_tokens, max_positions):
"""Return a dummy batch with a given number of tokens."""
raise NotImplementedError
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
raise NotImplementedError
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
raise NotImplementedError
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
raise NotImplementedError
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/backtranslation_dataset.py | translation/fairseq/data/backtranslation_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import sequence_generator
from . import FairseqDataset, language_pair_dataset
class BacktranslationDataset(FairseqDataset):
def __init__(self, args, tgt_dataset, tgt_dict, backtranslation_model):
"""
Sets up a backtranslation dataset which takes a tgt batch, generates
a src using a tgt-src backtranslation_model, and returns the
corresponding {generated src, input tgt} batch
Args:
args: generation args for the backtranslation SequenceGenerator'
Note that there is no equivalent argparse code for these args
anywhere in our top level train scripts yet. Integration is
still in progress. You can still, however, test out this dataset
functionality with the appropriate args as in the corresponding
unittest: test_backtranslation_dataset.
tgt_dataset: dataset which will be used to build self.tgt_dataset --
a LanguagePairDataset with tgt dataset as the source dataset and
None as the target dataset.
We use language_pair_dataset here to encapsulate the tgt_dataset
so we can re-use the LanguagePairDataset collater to format the
batches in the structure that SequenceGenerator expects.
tgt_dict: tgt dictionary (typically a joint src/tgt BPE dictionary)
backtranslation_model: tgt-src model to use in the SequenceGenerator
to generate backtranslations from tgt batches
"""
self.tgt_dataset = language_pair_dataset.LanguagePairDataset(
src=tgt_dataset,
src_sizes=None,
src_dict=tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
self.backtranslation_generator = sequence_generator.SequenceGenerator(
[backtranslation_model],
tgt_dict,
unk_penalty=args.backtranslation_unkpen,
sampling=args.backtranslation_sampling,
beam_size=args.backtranslation_beam,
)
self.backtranslation_max_len_a = args.backtranslation_max_len_a
self.backtranslation_max_len_b = args.backtranslation_max_len_b
self.backtranslation_beam = args.backtranslation_beam
def __getitem__(self, index):
"""
Returns a single sample. Multiple samples are fed to the collater to
create a backtranslation batch. Note you should always use collate_fn
BacktranslationDataset.collater() below if given the option to
specify which collate_fn to use (e.g. in a dataloader which uses this
BacktranslationDataset -- see corresponding unittest for an example).
"""
return self.tgt_dataset[index]
def __len__(self):
"""
The length of the backtranslation dataset is the length of tgt.
"""
return len(self.tgt_dataset)
def collater(self, samples):
"""
Using the samples from the tgt dataset, load a collated tgt sample to
feed to the backtranslation model. Then take the generated translation
with best score as the source and the orignal net input as the target.
"""
collated_tgt_only_sample = self.tgt_dataset.collater(samples)
backtranslation_hypos = self._generate_hypotheses(collated_tgt_only_sample)
# Go through each tgt sentence in batch and its corresponding best
# generated hypothesis and create a backtranslation data pair
# {id: id, source: generated backtranslation, target: original tgt}
generated_samples = []
for input_sample, hypos in zip(samples, backtranslation_hypos):
generated_samples.append(
{
"id": input_sample["id"],
"source": hypos[0]["tokens"], # first hypo is best hypo
"target": input_sample["source"],
}
)
return language_pair_dataset.collate(
samples=generated_samples,
pad_idx=self.tgt_dataset.src_dict.pad(),
eos_idx=self.tgt_dataset.src_dict.eos(),
)
def get_dummy_batch(self, num_tokens, max_positions):
""" Just use the tgt dataset get_dummy_batch """
self.tgt_dataset.get_dummy_batch(num_tokens, max_positions)
def num_tokens(self, index):
""" Just use the tgt dataset num_tokens """
self.tgt_dataset.num_tokens(index)
def ordered_indices(self):
""" Just use the tgt dataset ordered_indices """
self.tgt_dataset.ordered_indices
def valid_size(self, index, max_positions):
""" Just use the tgt dataset size """
self.tgt_dataset.valid_size(index, max_positions)
def _generate_hypotheses(self, sample):
"""
Generates hypotheses from a LanguagePairDataset collated / batched
sample. Note in this case, sample["target"] is None, and
sample["net_input"]["src_tokens"] is really in tgt language.
"""
self.backtranslation_generator.cuda()
input = sample["net_input"]
srclen = input["src_tokens"].size(1)
hypos = self.backtranslation_generator.generate(
input,
maxlen=int(
self.backtranslation_max_len_a * srclen + self.backtranslation_max_len_b
),
)
return hypos
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/language_pair_dataset.py | translation/fairseq/data/language_pair_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import numpy as np
import torch
from fairseq import utils
from . import data_utils, FairseqDataset
def collate(
samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False,
input_feeding=True,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
# sort by descending source length
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get('target', None) is not None:
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum(len(s['target']) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
'target',
left_pad=left_pad_target,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s['source']) for s in samples)
batch = {
'id': id,
'ntokens': ntokens,
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
'target': target,
'nsentences': samples[0]['source'].size(0),
}
if prev_output_tokens is not None:
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch
class LanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side.
Default: ``True``
left_pad_target (bool, optional): pad target tensors on the left side.
Default: ``False``
max_source_positions (int, optional): max number of tokens in the source
sentence. Default: ``1024``
max_target_positions (int, optional): max number of tokens in the target
sentence. Default: ``1024``
shuffle (bool, optional): shuffle dataset elements before batching.
Default: ``True``
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for input feeding/teacher forcing.
Default: ``True``
"""
def __init__(
self, src, src_sizes, src_dict,
tgt=None, tgt_sizes=None, tgt_dict=None,
left_pad_source=True, left_pad_target=False,
max_source_positions=1024, max_target_positions=1024,
shuffle=True, input_feeding=True,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.max_source_positions = max_source_positions
self.max_target_positions = max_target_positions
self.shuffle = shuffle
self.input_feeding = input_feeding
def __getitem__(self, index):
return {
'id': index,
'source': self.src[index],
'target': self.tgt[index] if self.tgt is not None else None,
}
def __len__(self):
return len(self.src)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one position
for input feeding/teacher forcing, of shape `(bsz,
tgt_len)`. This key will not be present if *input_feeding*
is ``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
"""
return collate(
samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(),
left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
)
def get_dummy_batch(self, num_tokens, max_positions, src_len=128, tgt_len=128):
"""Return a dummy batch with a given number of tokens."""
src_len, tgt_len = utils.resolve_max_positions(
(src_len, tgt_len),
max_positions,
(self.max_source_positions, self.max_target_positions),
)
bsz = num_tokens // max(src_len, tgt_len)
return self.collater([
{
'id': i,
'source': self.src_dict.dummy_sentence(src_len),
'target': self.tgt_dict.dummy_sentence(tgt_len) if self.tgt_dict is not None else None,
}
for i in range(bsz)
])
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]
return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/dictionary.py | translation/fairseq/data/dictionary.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from collections import Counter
import os
import torch
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):
self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
self.symbols = []
self.count = []
self.indices = {}
# dictionary indexing starts at 1 for consistency with Lua
self.add_symbol('<Lua heritage>')
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def index(self, sym):
"""Returns the index of the specified symbol"""
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return '\n'.join(self.string(t) for t in tensor)
def token_string(i):
if i == self.unk():
return self.unk_string(escape_unk)
else:
return self[i]
sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
if bpe_symbol is not None:
sent = (sent + ' ').replace(bpe_symbol, '').rstrip()
return sent
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if padding_factor > 1:
i = 0
while threshold_nwords % padding_factor != 0:
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert len(new_symbols) % padding_factor == 0
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f, ignore_utf_errors=False):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
if isinstance(f, str):
try:
if not ignore_utf_errors:
with open(f, 'r', encoding='utf-8') as fd:
return cls.load(fd)
else:
with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
return cls.load(fd)
except FileNotFoundError as fnfe:
raise fnfe
except Exception:
raise Exception("Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f))
d = cls()
for line in f.readlines():
idx = line.rfind(' ')
word = line[:idx]
count = int(line[idx+1:])
d.indices[word] = len(d.symbols)
d.symbols.append(word)
d.count.append(count)
return d
def save(self, f):
"""Stores dictionary into a text file"""
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):
print('{} {}'.format(symbol, count), file=f)
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__), {})
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/__init__.py | translation/fairseq/data/__init__.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .dictionary import Dictionary, TruncatedDictionary
from .fairseq_dataset import FairseqDataset
from .indexed_dataset import IndexedDataset, IndexedInMemoryDataset, IndexedRawTextDataset
from .language_pair_dataset import LanguagePairDataset
from .monolingual_dataset import MonolingualDataset
from .token_block_dataset import TokenBlockDataset
from .iterators import (
CountingIterator,
EpochBatchIterator,
GroupedIterator,
ShardedIterator,
)
__all__ = [
'CountingIterator',
'Dictionary',
'EpochBatchIterator',
'FairseqDataset',
'GroupedIterator',
'IndexedDataset',
'IndexedInMemoryDataset',
'IndexedRawTextDataset',
'LanguagePairDataset',
'MonolingualDataset',
'ShardedIterator',
'TokenBlockDataset',
]
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
zhuohan123/macaron-net | https://github.com/zhuohan123/macaron-net/blob/3a84e7a3323bd1a3a9a303194ba336d670a1fb2c/translation/fairseq/data/indexed_dataset.py | translation/fairseq/data/indexed_dataset.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import struct
import numpy as np
import torch
from fairseq.tokenizer import Tokenizer
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for TorchNet IndexedDataset"""
def __init__(self, path, fix_lua_indexing=False, read_data=True):
super().__init__()
self.fix_lua_indexing = fix_lua_indexing
self.read_index(path)
self.data_file = None
if read_data:
self.read_data(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == b'TNTIDX\x00\x00'
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self.size, self.s = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, self.size + 1)
self.data_offsets = read_longs(f, self.size + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self.size
@staticmethod
def exists(path):
return (
os.path.exists(index_file_path(path)) and
os.path.exists(data_file_path(path))
)
class IndexedInMemoryDataset(IndexedDataset):
"""Loader for TorchNet IndexedDataset, keeps all the data in memory"""
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb')
self.buffer = np.empty(self.data_offsets[-1], dtype=self.dtype)
self.data_file.readinto(self.buffer)
self.data_file.close()
if self.fix_lua_indexing:
self.buffer -= 1 # subtract 1 for 0-based indexing
def __del__(self):
pass
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
return torch.from_numpy(a).long()
class IndexedRawTextDataset(IndexedDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, 'r') as f:
for line in f:
self.lines.append(line.strip('\n'))
tokens = Tokenizer.tokenize(
line, dictionary, add_if_not_exist=False,
append_eos=self.append_eos, reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
@staticmethod
def exists(path):
return os.path.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file, read_data=False)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
| python | BSD-3-Clause | 3a84e7a3323bd1a3a9a303194ba336d670a1fb2c | 2026-01-05T07:14:10.995304Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/train_cddsa.py | code/train_cddsa.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2022-12-20 19:10
# @Author : Ran Gu
"""
Apply feature disentanglement and novel contrastive learning in multi sites fundus segmentation
"""
import argparse
import logging
import os
import sys
import yaml
import math
import tqdm
import random
import torch
import timeit
import numpy as np
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from utils.losses import *
from datetime import datetime
from torchvision import transforms
from torchvision.utils import make_grid
from dataloader.ms_fundus.fundus_dataloader import FundusSegmentation
from dataloader.ms_fundus import fundus_transforms as tr
from torch.utils.data import ConcatDataset, DataLoader
from models.networks.sdnet import MEncoder, AEncoder, Segmentor, Ada_Decoder
from models.weight_init import initialize_weights
from utils.average_meter import AverageMeter
from utils.utils_fundus import sample_minibatch_fundus
from tensorboardX import SummaryWriter
from pytorch_metric_learning import losses
torch.set_default_tensor_type('torch.FloatTensor')
def parse_args():
desc = "Pytorch implementation of CDDSA (RanGu)"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# dir config
parser.add_argument('--exp_dir', type=str, default='./exp/ms_fundus/train_cddsa')
parser.add_argument('--data_dir', type=str, default='/mnt/data1/guran/Data/ms_fundus')
# data config
parser.add_argument('--data_size', type=int, default=256)
# GPU config
parser.add_argument('--gpu', type=str, default='0')
# training config
parser.add_argument('--resume', default=None, help='checkpoint path')
parser.add_argument('--datasetTrain', nargs='+', type=int, default=[2,3,4], help='train folder id contain images ROIs to train range from [1,2,3,4]')
parser.add_argument('--datasetTest', nargs='+', type=int, default=[1], help='test folder id contain images ROIs to test one of [1,2,3,4]')
parser.add_argument('--in_channel', type=int, default=3)
parser.add_argument('--z_length', type=int, default=16)
parser.add_argument('--anatomy_channel', type=int, default=8)
parser.add_argument('--kl_w', type=float, default=0.001)
parser.add_argument('--seg_w', type=float, default=1)
parser.add_argument('--reco_w', type=float, default=1)
parser.add_argument('--recoz_w', type=float, default=1)
parser.add_argument('--style_w', type=float, default=0.1)
parser.add_argument('--cont_w', type=float, default=0.2)
parser.add_argument('--tau', type=float, default=0.1)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--n_minibatch', type=int, default=8)
parser.add_argument('--n_sample', type=int, default=1)
parser.add_argument('--num_classes', type=int, default=2)
parser.add_argument('--start_epoch', type=int, default=0)
parser.add_argument('--epoches', type=int, default=200)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('-wi', '--weight_init', type=str, default="xavier",
help='Weight initialization method, or path to weights file '
'(for fine-tuning or continuing training)')
parser.add_argument('--print_interval', type=int, default=1)
parser.add_argument('--val_interval', type=int, default=2)
parser.add_argument('--save_interval', type=int, default=25)
# utils.check_folder(parser.parse_args().exp_dir)
return parser.parse_args()
def validate_slice(M_enc, A_enc, Seg, Dec, dataloader, args, writer, iter_num):
training = M_enc.training
M_enc.eval()
A_enc.eval()
Seg.eval()
Dec.eval()
val_dice = AverageMeter()
with torch.no_grad():
for num, sample in enumerate(tqdm.tqdm(dataloader, total=len(dataloader), ncols=80, leave=False)):
for batch in sample:
img_val, gt_val = batch['image'].cuda(), batch['label'].cuda()
with torch.no_grad():
a_out = A_enc(img_val)
pred_val = Seg(a_out)
z_out, mu_out, logvar_out = M_enc(img_val)
# for reconstruction
reco = Dec(a_out, mu_out)
pred_val = torch.sigmoid(pred_val)
val_dice.update(val_dice_class(pred_val.permute(0,2,3,1) > 0.75, gt_val.permute(0,2,3,1), num_class=args.num_classes))
if (num+1) % (args.print_interval+25) == 0:
# summarywriter image
grid_image = make_grid(img_val.clone().cpu().data, args.batch_size, normalize=True)
pred_show = pred_val.clone().cpu().data
gt_show = gt_val.clone().cpu().data
writer.add_image('val/images', grid_image, iter_num)
writer.add_images('val/cup_ground_truths', gt_show[:, 0:1, :, :], iter_num)
writer.add_images('val/cup_preds', pred_show[:, 0:1, :, :], iter_num)
writer.add_images('val/disc_ground_truths', gt_show[:, 1:2, :, :], iter_num)
writer.add_images('val/disc_preds', pred_show[:, 1:2, :, :], iter_num)
# utils.save_imgs(img.cpu().detach(), gt.cpu().detach(), seg_pred.cpu().detach(), reco.cpu().detach(), img_folder)
# utils.save_anatomy_factors(a_out[0].cpu().numpy(), anatomy_folder)
if training:
M_enc.train()
A_enc.train()
Seg.train()
Dec.train()
return val_dice.avg
def train(model, train_loader, val_loader, writer, args):
# define the model
m_encoder = model['M_enc']
a_encoder = model['A_enc']
segmentor = model['Seg']
decoder = model['Dec']
# define the criterion
l1_distance = torch.nn.L1Loss()
dice_criterion = DiceLoss()
bce_criterion = torch.nn.BCELoss()
style_criterion = losses.NTXentLoss(temperature=0.1).cuda()
# define the optimizer
optimizer = optim.Adam([{'params': m_encoder.parameters()}, {'params': a_encoder.parameters()},
{'params': segmentor.parameters()}, {'params': decoder.parameters()}],
betas=(0.9, 0.99), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.9, patience=(16/args.val_interval), verbose=True, min_lr=1e-4)
best_val_dice, best_epoch = torch.tensor([0.0, 0.0]), 1
for epoch in range(args.start_epoch, args.epoches):
domain_loss = []
for i in range(len(args.datasetTrain)):
kl_loss_epoch = AverageMeter()
seg_loss_epoch = AverageMeter()
reco_loss_epoch = AverageMeter()
recoz_loss_epoch = AverageMeter()
style_loss_epoch = AverageMeter()
cont_loss_epoch = AverageMeter()
total_loss_epoch = AverageMeter()
domain_loss.append({'kl': kl_loss_epoch, 'seg': seg_loss_epoch, 'reco': reco_loss_epoch,
'recoz': recoz_loss_epoch, 'style': style_loss_epoch, 'cont': cont_loss_epoch,
'total': total_loss_epoch})
# train in each epoch
start_time = timeit.default_timer()
for batch_idx, sample in tqdm.tqdm(
enumerate(train_loader), total=len(train_loader),
desc='Train epoch=%d' % epoch, ncols=80, leave=False):
iteration = batch_idx + epoch * len(train_loader)
domain_stylec = [[] for i in range(len(args.datasetTrain))]
domain_content = [[] for i in range(len(args.datasetTrain))]
a_encoder.train()
segmentor.train()
m_encoder.train()
decoder.train()
total_loss = 0
for dc, domain in enumerate(sample):
image = domain['image'].cuda()
label = domain['label'].cuda()
# model forward
a_out = a_encoder(image)
seg_pred = segmentor(a_out)
z_out, mu_out, logvar_out = m_encoder(image)
# for reconstruction
reco = decoder(a_out, z_out)
z_out_tiled, _, _ = m_encoder(reco)
seg_pred = torch.sigmoid(seg_pred)
# collect style code and anatomy content in each domain
domain_stylec[dc].append(z_out)
domain_content[dc].append(a_out)
# Lank loss for z_out
reco_loss = l1_distance(reco, image)
kl_loss = KL_divergence(logvar_out, mu_out)
dice_loss = dice_criterion(seg_pred.permute(0,2,3,1), label.permute(0,2,3,1), num_class=args.num_classes)
bce_loss = bce_criterion(seg_pred, label)
seg_loss = 0.5 * (dice_loss + bce_loss)
recoz_loss = l1_distance(z_out_tiled, z_out)
domain_total_loss = args.kl_w * kl_loss + \
args.seg_w * seg_loss + \
args.reco_w * reco_loss + \
args.recoz_w * recoz_loss
total_loss += domain_total_loss
domain_loss[dc]['kl'].update(kl_loss.cpu())
domain_loss[dc]['seg'].update(seg_loss.cpu())
domain_loss[dc]['reco'].update(reco_loss.cpu())
domain_loss[dc]['recoz'].update(recoz_loss.cpu())
domain_loss[dc]['total'].update(domain_total_loss.cpu())
dc_num = len(args.datasetTrain)
if dc == (dc_num-1):
minibatch_domain_stylec = [[] for i in range(dc_num)]
domain_label = [[] for i in range(dc_num)]
reco_zout = 0
for i in range(dc_num):
domain_stacked_stylec = torch.cat(domain_stylec[i], dim=0)
reco_zout += domain_stacked_stylec * (1-torch.rand(domain_stacked_stylec.size(0),1)*2).cuda()
domain_label[i] = torch.tensor([i] * args.n_minibatch).cuda()
minibatch_domain_stylec[i] = sample_minibatch_fundus(domain_stacked_stylec, args.n_minibatch, 1)
embeddings = torch.cat(minibatch_domain_stylec, dim=0)
labels = torch.cat(domain_label, dim=0)
style_loss = style_criterion(embeddings, labels)
total_loss += args.style_w * style_loss
domain_loss[0]['style'].update(style_loss.cpu())
domain_content_loss = torch.tensor(0).cuda().float()
for i in range(dc_num):
domain_stacked_aout = torch.cat(domain_content[i], dim=0)
new_reco = decoder(domain_stacked_aout, reco_zout)
new_aout = a_encoder(new_reco)
domain_content_loss += l1_distance(new_aout, domain_stacked_aout)
if (epoch + 1) % (args.print_interval+19) == 0 and (batch_idx % 10) == 0:
grid_image = make_grid(new_reco, nrow=args.batch_size, normalize=True)
writer.add_image('Train/new_reconstruction', grid_image, epoch)
new_aout_shape = new_aout.size()
grid_image = make_grid(new_aout.reshape(new_aout_shape[0]*new_aout_shape[1], new_aout_shape[2], new_aout_shape[3]).unsqueeze(dim=1),
nrow=args.batch_size, normalize=True)
writer.add_image('Train/new_anatomy', grid_image, epoch)
total_loss += args.cont_w * (domain_content_loss/dc_num)
domain_loss[0]['cont'].update(domain_content_loss.cpu())
# tensorboard for visualing train result
if (epoch + 1) % (args.print_interval+19) ==0 and (batch_idx % 10) == 0:
grid_image = make_grid(image, nrow=args.batch_size, normalize=True)
writer.add_image('Train/imgs', grid_image, epoch)
a_out_shape = a_out.size()
grid_image = make_grid(a_out.reshape(a_out_shape[0]*a_out_shape[1], a_out_shape[2], a_out_shape[3]).unsqueeze(dim=1),
nrow=args.batch_size, normalize=True)
writer.add_image('Train/anatomy', grid_image, epoch)
grid_image = make_grid(reco, nrow=args.batch_size, normalize=True)
writer.add_image('Train/reconstruction', grid_image, epoch)
label_shape = label.size()
grid_image = make_grid(label.reshape(label_shape[0]*label_shape[1], label_shape[2], label_shape[3]).unsqueeze(dim=1),
nrow=args.batch_size, normalize=True)
writer.add_image('Train/mask', grid_image, epoch)
pred_shape = seg_pred.size()
grid_image = make_grid(seg_pred.reshape(pred_shape[0]*pred_shape[1], pred_shape[2], pred_shape[3]).unsqueeze(dim=1),
nrow=args.batch_size, normalize=True)
writer.add_image('Train/prediction', grid_image, epoch)
# backward the gradient
total_loss = total_loss / len(args.datasetTrain)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# print training result
logging.info('\n Epoch[%4d/%4d]-Lr: %.6f --> Train...' % (epoch+1, args.epoches, optimizer.param_groups[0]['lr']))
for i in range(len(args.datasetTrain)):
logging.info('\t Domain-%d: [Total Loss: %.4f]: KL Loss = %.4f, Seg Loss = %.4f, Reco Loss = %.4f, RecoZ Loss = %.4f' %
(args.datasetTrain[i], domain_loss[i]['total'].avg, domain_loss[i]['kl'].avg, domain_loss[i]['seg'].avg,
domain_loss[i]['reco'].avg, domain_loss[i]['recoz'].avg))
logging.info('\t Domain-all: Domain style contrast Loss = %.4f, Domain content Loss = %.4f' %
(domain_loss[0]['style'].avg, domain_loss[0]['cont'].avg))
# tensorboard
if (epoch+1) % args.print_interval == 0:
writer.add_scalar('Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
writer.add_scalars('Train/Domain_all/Losses',
{'style': domain_loss[0]['style'].avg, 'cont': domain_loss[0]['cont'].avg}, epoch)
for i, train_dc in enumerate(args.datasetTrain):
writer.add_scalars('Train/Domain{}/Losses'.format(train_dc),
{'kl': domain_loss[i]['kl'].avg, 'seg': domain_loss[i]['seg'].avg,
'reco': domain_loss[i]['reco'].avg, 'recoz': domain_loss[i]['recoz'].avg,
'total': domain_loss[i]['total'].avg}, epoch)
# validate and visualization
result_dir = os.path.join(args.workspace, 'val_results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
model_dir = os.path.join(args.workspace, 'models')
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if (epoch+1) % args.val_interval == 0:
val_img_path = os.path.join(result_dir, 'Ep_%04d_imgs.png' % (epoch+1))
val_anatomy_path = os.path.join(result_dir, 'Ep_%04d_anatomys.png' % (epoch+1))
val_dice = validate_slice(m_encoder, a_encoder, segmentor, decoder, val_loader, val_img_path,
val_anatomy_path, args, writer, epoch)
logging.info('\n Epoch[%4d/%4d] --> Valid...' % (epoch+1, args.epoches))
logging.info('\t [Dice Coef: mean=%.4f, cup=%.4f, disc=%.4f]' % (torch.mean(val_dice), val_dice[0], val_dice[1]))
writer.add_scalars('Val/Dice', {'cup': val_dice[0], 'disc': val_dice[1],
'mean': torch.mean(val_dice)}, epoch)
# save best model
if torch.mean(val_dice) >= torch.mean(best_val_dice):
best_model_path = os.path.join(model_dir, 'best_model.pth')
torch.save({'M_enc': m_encoder.state_dict(), 'A_enc': a_encoder.state_dict(), 'Seg': segmentor.state_dict(),
'Dec': decoder.state_dict()}, best_model_path)
logging.info('\n Epoch[%4d/%4d] --> Dice improved from %.4f (cup=%.4f, disc=%.4f in epoch %4d) to %.4f (cup=%.4f, disc=%.4f)' %
(epoch+1, args.epoches, torch.mean(best_val_dice), best_val_dice[0], best_val_dice[1], best_epoch, torch.mean(val_dice),
val_dice[0], val_dice[1]))
best_val_dice, best_epoch = val_dice, epoch+1
else:
logging.info('\n Epoch[%4d/%4d] --> Dice did not improved with %.4f (cup=%.4f, disc=%.4f in epoch %d)' %
(epoch+1, args.epoches, torch.mean(best_val_dice), best_val_dice[0], best_val_dice[1], best_epoch))
# check for plateau
scheduler.step(torch.mean(val_dice))
# # save images
# train_img_dir = utils.check_folder(os.path.join(args.exp_dir, 'train_img'))
# train_img_path = os.path.join(train_img_dir, 'Ep_%04d_imgs_dice_%.4f.png' % (epoch, val_dice))
# utils.save_imgs(img.cpu().detach(), gt.cpu().detach(), seg_pred.cpu().detach(), reco.cpu().detach(),
# train_img_path)
# train_anatomy_path = os.path.join(train_img_dir, 'Ep_%04d_anatomys_dice_%.4f.png' % (epoch, val_dice))
# utils.save_anatomy_factors(a_out[0].cpu().detach(), train_anatomy_path)
# save model
if (epoch+1) >= (args.epoches-100) and (epoch+1) % args.save_interval == 0:
model_path = os.path.join(model_dir, 'Ep_%04d_dice_%.4f.pth' % ((epoch+1), torch.mean(val_dice)))
torch.save({'M_enc': m_encoder.state_dict(), 'A_enc': a_encoder.state_dict(), 'Seg': segmentor.state_dict(),
'Dec': decoder.state_dict(), 'optim': optimizer.state_dict()}, model_path)
logging.info('\t [Save Model] to %s' % model_path)
def main():
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.deterministic:
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed) # Numpy module.
random.seed(args.seed) # Python random module.
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
# define logger
now = datetime.now()
args.workspace = os.path.join(args.exp_dir, 'test'+str(args.datasetTest)[1:-1].replace(", ","_"), now.strftime('%Y%m%d_%H%M%S.%f'))
if not os.path.exists(args.workspace):
os.makedirs(args.workspace)
logging.basicConfig(filename=os.path.join(args.workspace, 'train.log'), level=logging.INFO,
format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
# print all parameters
for name, v in vars(args).items():
logging.info(name + ': ' + str(v))
with open(os.path.join(args.workspace, 'config.yaml'), 'w') as f:
yaml.safe_dump(args.__dict__, f, default_flow_style=False)
# 1. dataset
composed_transforms_tr = transforms.Compose([
tr.RandomScaleCrop(256),
# tr.RandomCrop(512),
# tr.RandomRotate(),
# tr.RandomFlip(),
# tr.elastic_transform(),
# tr.add_salt_pepper_noise(),
# tr.adjust_light(),
# tr.eraser(),
tr.Normalize_tf(),
tr.ToTensor()
])
composed_transforms_ts = transforms.Compose([
tr.RandomCrop(256),
tr.Normalize_tf(),
tr.ToTensor()
])
# dataloader config
train_set = FundusSegmentation(base_dir=args.data_dir, phase='train', splitid=args.datasetTrain,
transform=composed_transforms_tr)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=6, drop_last=True)
valid_set = FundusSegmentation(base_dir=args.data_dir, phase='test', splitid=args.datasetTest,
transform=composed_transforms_ts)
valid_loader = DataLoader(valid_set, batch_size=1, shuffle=False, num_workers=6)
# model configuration
m_encoder = MEncoder(z_length=args.z_length, in_channel=args.in_channel, img_size=args.data_size)
a_encoder = AEncoder(in_channel=args.in_channel, width=256, height=256, ndf=16, num_output_channels=args.anatomy_channel, norm='batchnorm', upsample='bilinear')
segmentor = Segmentor(num_output_channels=args.anatomy_channel, num_class=args.num_classes)
decoder = Ada_Decoder(anatomy_out_channel=args.anatomy_channel, z_length=args.z_length, out_channel=args.in_channel)
print('parameter numer:', sum([p.numel() for p in m_encoder.parameters()]+
[p.numel() for p in a_encoder.parameters()]+
[p.numel() for p in segmentor.parameters()]+
[p.numel() for p in decoder.parameters()]))
models = {'M_enc': m_encoder.cuda(), 'A_enc': a_encoder.cuda(), 'Seg': segmentor.cuda(), 'Dec': decoder.cuda()}
if args.resume:
checkpoint = torch.load(args.resume)
for keys, md in models.items():
pretrained_dict = checkpoint[keys]
model_dict = md.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
md.load_state_dict(model_dict)
models[keys] = md
print('Resume models finished!')
else:
for md in models.values():
initialize_weights(md, args.weight_init)
# summary writer config
run_dir = os.path.join(args.workspace, 'run')
if not os.path.exists(run_dir):
os.mkdir(run_dir)
writer = SummaryWriter(log_dir=run_dir, comment=args.exp_dir.split('/')[-1])
# train
train(models, train_loader, valid_loader, writer, args)
if __name__ == '__main__':
main()
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/test_cddsa_fundus.py | code/test_cddsa_fundus.py | #!/usr/bin/env python
import os
import cv2
import sys
from numpy.lib.type_check import iscomplex
import pytz
import tqdm
import torch
import random
import argparse
import numpy as np
import os.path as osp
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from dataloader import utils
from dataloader.ms_fundus.fundus_dataloader import FundusSegmentation
from dataloader.ms_fundus import fundus_transforms as tr
# from scipy.misc import imsave
from utils.utils_fundus import joint_val_image, postprocessing, save_per_img
from utils.losses import *
from datetime import datetime
from models.networks.sdnet import MEncoder, AEncoder, Segmentor, Ada_Decoder
from medpy.metric import binary
torch.set_default_tensor_type('torch.FloatTensor')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--seed', type=int, default=123, help='random seed')
# dir config
parser.add_argument('--exp_dir', type=str, default='./exp/ms_fundus/train_cddsa')
parser.add_argument('--data_dir', type=str, default='/mnt/data1/guran/Data/ms_fundus')
# data config
parser.add_argument('--data_size', type=int, default=256)
# GPU file
parser.add_argument('-g', '--gpu', type=int, default=1)
# test config
parser.add_argument('--model-file', type=str, default='test1/20220929_095627.404592/models/Ep_0200_dice_0.8893.pth', help='Model path')
parser.add_argument('--datasetTest', type=list, default=[1], help='test folder id contain images ROIs to test')
parser.add_argument('--dataset', type=str, default='test', help='test folder id contain images ROIs to test')
parser.add_argument('--in_channel', type=int, default=3)
parser.add_argument('--save_imgs', type=bool, default=False)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_classes', type=int, default=2)
parser.add_argument('--z_length', type=int, default=16)
parser.add_argument('--anatomy_channel', type=int, default=8)
args = parser.parse_args()
if args.deterministic:
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed) # Numpy module.
random.seed(args.seed) # Python random module.
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
model_file = os.path.join(args.exp_dir, args.model_file)
output_path = os.path.join(args.exp_dir, 'test' + str(args.datasetTest[0]), args.model_file.split('/')[1])
if not os.path.exists(output_path):
os.makedirs(output_path)
# 1. dataset
composed_transforms_test = transforms.Compose([
tr.Normalize_tf(),
tr.ToTensor()
])
db_test = FundusSegmentation(base_dir=args.data_dir, phase='test', splitid=args.datasetTest,
transform=composed_transforms_test, state='prediction')
batch_size = args.batch_size
test_loader = DataLoader(db_test, batch_size=batch_size, shuffle=False, num_workers=1, pin_memory=True)
# 2. model
m_encoder = MEncoder(z_length=args.z_length, in_channel=args.in_channel, img_size=args.data_size)
a_encoder = AEncoder(in_channel=args.in_channel, width=256, height=256, ndf=16, num_output_channels=args.anatomy_channel, norm='batchnorm', upsample='bilinear')
segmentor = Segmentor(num_output_channels=args.anatomy_channel, num_class=args.num_classes)
decoder = Ada_Decoder(anatomy_out_channel=args.anatomy_channel, z_length=args.z_length, out_channel=args.in_channel)
if torch.cuda.is_available():
models = {'M_enc': m_encoder.cuda(), 'A_enc': a_encoder.cuda(), 'Seg': segmentor.cuda(), 'Dec': decoder.cuda()}
print('==> Loading model file: %s' % (model_file))
# model_data = torch.load(model_file)
checkpoint = torch.load(model_file)
for keys, md in models.items():
pretrained_dict = checkpoint[keys]
model_dict = md.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
md.load_state_dict(model_dict)
models[keys] = md
val_cup_dice = []
val_disc_dice = []
total_hd_OC = []
total_hd_OD = []
total_asd_OC = []
total_asd_OD = []
timestamp_start = datetime.now(pytz.timezone('Asia/Hong_Kong'))
total_num = 0
models['M_enc'].eval()
models['A_enc'].eval()
models['Seg'].eval()
models['Dec'].eval()
for batch_idx, (sample) in tqdm.tqdm(enumerate(test_loader),total=len(test_loader),ncols=80, leave=False):
for batch in sample:
data = batch['image']
target = batch['label']
img_name = batch['img_name']
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
with torch.no_grad():
a_out = models['A_enc'](data)
prediction = models['Seg'](a_out)
z_out, mu_out, logvar_out = models['M_enc'](data)
# for reconstruction
reco = models['Dec'](a_out, z_out)
prediction = torch.nn.functional.interpolate(prediction, size=(target.size()[2], target.size()[3]), mode="bilinear")
data = torch.nn.functional.interpolate(data, size=(target.size()[2], target.size()[3]), mode="bilinear")
target_numpy = target.data.cpu()
imgs = data.data.cpu()
hd_OC = 100
asd_OC = 100
hd_OD = 100
asd_OD = 100
for i in range(prediction.shape[0]):
prediction_post = postprocessing(prediction[i], dataset=args.dataset)
# prediction_post = torch.sigmoid(prediction[i]).data.cpu().numpy()
test_dice = val_dice_class(torch.from_numpy(prediction_post).permute(1,2,0).cuda(), target[i].permute(1,2,0), num_class=args.num_classes)
val_cup_dice.append(test_dice[0].data.cpu().numpy())
val_disc_dice.append(test_dice[1].data.cpu().numpy())
if np.sum(prediction_post[0, ...]) < 1e-4:
hd_OC = 100
asd_OC = 100
else:
hd_OC = binary.hd95(np.asarray(prediction_post[0, ...], dtype=np.bool),
np.asarray(target_numpy[i, 0, ...], dtype=np.bool))
asd_OC = binary.asd(np.asarray(prediction_post[0, ...], dtype=np.bool),
np.asarray(target_numpy[i, 0, ...], dtype=np.bool))
if np.sum(prediction_post[1, ...]) < 1e-4:
hd_OD = 100
asd_OD = 100
else:
hd_OD = binary.hd95(np.asarray(prediction_post[1, ...], dtype=np.bool),
np.asarray(target_numpy[i, 1, ...], dtype=np.bool))
asd_OD = binary.asd(np.asarray(prediction_post[1, ...], dtype=np.bool),
np.asarray(target_numpy[i, 1, ...], dtype=np.bool))
total_hd_OC.append(hd_OC)
total_hd_OD.append(hd_OD)
total_asd_OC.append(asd_OC)
total_asd_OD.append(asd_OD)
total_num += 1
if args.save_imgs:
for img, lt, lp in zip([imgs[i]], [target_numpy[i]], [prediction_post]):
img, lt = utils.untransform(img, lt)
save_per_img(img.numpy().transpose(1, 2, 0),
os.path.join(output_path,'test_results'),
img_name[i],
lp, lt, mask_path=None, ext="bmp")
print('OC:', val_cup_dice)
print('OD:', val_disc_dice)
import csv
with open(output_path+'/Dice_results.csv', 'a+') as result_file:
wr = csv.writer(result_file, dialect='excel')
wr.writerow(['Result in: '+args.model_file])
for index in range(len(val_cup_dice)):
wr.writerow([torch.from_numpy(val_cup_dice[index]), torch.from_numpy(val_disc_dice[index])])
val_cup_dice_mean = np.mean(val_cup_dice)
val_cup_dice_std = np.std(val_cup_dice)
val_disc_dice_mean = np.mean(val_disc_dice)
val_disc_dice_std = np.std(val_disc_dice)
total_dice_mean = np.mean(val_cup_dice+val_disc_dice)
total_dice_std = np.std(val_cup_dice+val_disc_dice)
total_hd_OC_mean = np.mean(total_hd_OC)
total_hd_OC_std = np.std(total_hd_OC)
total_asd_OC_mean = np.mean(total_asd_OC)
total_asd_OC_std = np.std(total_asd_OC)
total_hd_OD_mean = np.mean(total_hd_OD)
total_hd_OD_std = np.std(total_hd_OD)
total_asd_OD_mean = np.mean(total_asd_OD)
total_asd_OD_std = np.std(total_asd_OD)
print('''\n==>val_cup_dice : {0}-{1}'''.format(val_cup_dice_mean, val_cup_dice_std))
print('''\n==>val_disc_dice : {0}-{1}'''.format(val_disc_dice_mean, val_disc_dice_std))
print('''\n==>val_average_dice : {0}-{1}'''.format(total_dice_mean, total_dice_std))
print('''\n==>ave_hd_OC : {0}-{1}'''.format(total_hd_OC_mean, total_hd_OC_std))
print('''\n==>ave_hd_OD : {0}-{1}'''.format(total_hd_OD_mean, total_hd_OD_std))
print('''\n==>ave_asd_OC : {0}-{1}'''.format(total_asd_OC_mean, total_asd_OC_std))
print('''\n==>ave_asd_OD : {0}-{1}'''.format(total_asd_OD_mean, total_asd_OD_std))
with open(osp.join(output_path, 'test' + str(args.datasetTest[0]) + '_log.csv'), 'a') as f:
elapsed_time = (
datetime.now(pytz.timezone('Asia/Hong_Kong')) -
timestamp_start).total_seconds()
log = [['batch-size: '] + [batch_size] + [args.model_file] + ['cup dice coefficence: '] + \
[val_cup_dice_mean]+['-']+[val_cup_dice_std] + ['disc dice coefficence: '] + \
[val_disc_dice_mean]+['-']+[val_disc_dice_std] + ['total dice coefficence: '] + \
[total_dice_mean]+['-']+[total_dice_std] + ['average_hd_OC: '] + \
[total_hd_OC_mean]+['-']+[total_hd_OC_std] + ['average_hd_OD: '] + \
[total_hd_OD_mean]+['-']+[total_hd_OD_std] + ['ave_asd_OC: '] + \
[total_asd_OC_mean]+['-']+[total_asd_OC_std] + ['average_asd_OD: '] + \
[total_asd_OD_mean]+['-']+[total_asd_OD_std] + ['elapse time: '] + \
[elapsed_time]]
log = map(str, log)
f.write(','.join(log) + '\n')
if __name__ == '__main__':
main()
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/settings.py | code/settings.py | import ast
import configparser
from collections.abc import Mapping
class Settings(Mapping):
def __init__(self, setting_file='settings.ini'):
config = configparser.ConfigParser()
config.read(setting_file)
self.settings_dict = _parse_values(config)
def __getitem__(self, key):
return self.settings_dict[key]
def __len__(self):
return len(self.settings_dict)
def __iter__(self):
return self.settings_dict.items()
def _parse_values(config):
config_parsed = {}
for section in config.sections():
config_parsed[section] = {}
for key, value in config[section].items():
config_parsed[section][key] = ast.literal_eval(value)
return config_parsed
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/train_cscada.py | code/train_cscada.py | # -*- coding: utf-8 -*-
# @Time : 2021/5/6 16:48
# @Author : Ran.Gu
# @Email : guran924@std.uestc.edu.cn
'''
This code is for 'Contrastive Semi-supervised Learning for Cross Anatomical Structure Domain Adaptative Segmentation'.
We used mean teacher as the backbone for semi-supervised domain adaptation across organs with similar shape structures.
While we introduced contrastive learning to encourage the source and target to be consistent in a latent space.
'''
import sys
import math
import os, random, torch, shutil, logging
from tqdm import tqdm
from settings import Settings
import numpy as np
import time
## experimental seed
torch.manual_seed(123)
torch.cuda.manual_seed(123)
torch.cuda.manual_seed_all(123)
np.random.seed(123) # Numpy module.
random.seed(123) # Python random module.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_default_tensor_type('torch.FloatTensor')
###
from utils import ramps, losses
from dataloader.ms_cmr.make_datalist import split_filelist
from dataloader.ms_fundus.Refuge_dataloader import RefugeDataset, ToTensor
from dataloader.ms_cmr.Cmr_dataloader import CmrsegDataset, TwoStreamBatchSampler
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from models.networks.cscada_net import Unet_dsbn_cont
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# os.chdir(sys.path[0])
def _val_on_the_fly(model:nn.Module, loader_val:list, writer, iter_num):
'''
validation, and save the results in the writer
:param model: segmentation model
:param loader_val: validation loader: list
:param writer: summarywriter
:param iter_num: current iteration
:return: loss
'''
model.eval()
bce_criterion = nn.BCELoss()
loss_bce_val = 0
dice_val = 0
# prediction by a patient
for num, batch_val in enumerate(loader_val):
img_val, gt_val = batch_val['image'].cuda(), batch_val['label'].cuda()
with torch.no_grad():
pred_val, _ = model(img_val, domain_label=1)
pred_val = F.softmax(pred_val, dim=1)
soft_gt = losses.get_soft_label(gt_val, net_params['num_classes'])
loss_bce_val += bce_criterion(pred_val, soft_gt.permute(0, 3, 1, 2))
pred_mask = torch.argmax(pred_val, dim=1, keepdim=True)
pred_softmask = losses.get_soft_label(pred_mask, net_params['num_classes'])
dice_val += losses.val_dice_class(pred_softmask[..., 1:], soft_gt[..., 1:], net_params['num_classes']-1)
grid_image = make_grid(img_val, nrow=train_params['batch_size'], normalize=True)
writer.add_image('val/images', grid_image, iter_num)
grid_image = make_grid(gt_val, nrow=train_params['batch_size'], normalize=True)
writer.add_image('val/ground_truths', grid_image, iter_num)
grid_image = make_grid(pred_mask.float(), nrow=train_params['batch_size'], normalize=True)
writer.add_image('val/preds', grid_image, iter_num)
loss_bce_val /= len(loader_val)
dice_val_class = dice_val / len(loader_val)
loss_dice_val = 1 - torch.mean(dice_val_class)
# summarywriter
writer.add_scalar('val/loss_bce', loss_bce_val, iter_num)
writer.add_scalar('val/loss_dice', loss_dice_val, iter_num)
# for tag, value in model.named_parameters():
# tag = tag.replace('.', '/')
# writer.add_histogram('weights/' + tag, value.data.cpu().numpy(), iter_num)
# writer.add_histogram('grads/' + tag, value.grad.data.cpu().numpy(), iter_num)
return loss_bce_val, loss_dice_val, dice_val_class
def train(model:nn.Module, ema_model:nn.Module, loader_train_s:list,
loader_train_t:list, loader_valid_t:list, train_params:dict, writer):
# define optimizer
if train_params['optimizer'] == 'adam':
optimizer = optim.Adam(model.parameters(), lr=train_params['learning_rate'],
weight_decay=train_params['weight_decay'])
elif train_params['optimizer'] == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=train_params['learning_rate'], momentum=train_params['momentum'],
weight_decay=train_params['weight_decay'])
# define losses
dice_criterion = losses.DiceLoss()
similar_criterion = nn.CosineSimilarity()
if train_params['consistency_type'] == 'mse':
consistency_criterion = losses.mse_loss
elif train_params['consistency_type'] == 'kl':
consistency_criterion = losses.kl_loss
else:
assert False, train_params['consistency_type']
# iterator
loader_t_iter = iter(loader_train_t)
logging.info("{} itertations per epoch".format(len(loader_train_s)))
iter_num = 0
max_epoch = train_params['iterations'] // len(loader_train_s)
lr_ = train_params['learning_rate']
best_val_dice = torch.tensor(0).float()
best_val_step = 0
for epoch_num in tqdm(range(max_epoch), ncols=70):
time1 = time.time()
for k, batch_s in enumerate(loader_train_s):
try:
batch_t = next(loader_t_iter)
except StopIteration:
loader_t_iter = iter(loader_train_t)
batch_t = next(loader_t_iter)
time2 = time.time()
# source and target data
image_s, label_s = batch_s['image'].cuda(), batch_s['label'].cuda()
image_t, label_t = batch_t['image'].cuda(), batch_t['label'].cuda()
unlabeled_image_t = image_t[train_params['labeled_bs']:]
noise = torch.clamp(torch.randn_like(unlabeled_image_t) * 0.05, -0.2, 0.2)
ema_inputs_t = unlabeled_image_t + noise
model.train()
ema_model.train()
predouts, high_r_s_sb = model(image_s, domain_label=0)
predoutt, high_r_t_tb = model(image_t, domain_label=1)
_, high_r_s_tb = model(image_s, domain_label=1)
_, high_r_t_sb = model(image_t, domain_label=0)
with torch.no_grad():
ema_output, _ = ema_model(ema_inputs_t, domain_label=1)
# segmentation loss
loss_bce_s = F.cross_entropy(predouts, label_s.squeeze().long())
predout_s = F.softmax(predouts, dim=1)
pred_mask_s = torch.argmax(predout_s, dim=1, keepdim=True)
soft_label_s = losses.get_soft_label(label_s, net_params['num_classes'])
loss_dice_s = dice_criterion(predout_s.permute(0, 2, 3, 1)[..., 1:], soft_label_s[..., 1:], net_params['num_classes']-1)
loss_s = 0.5 * (loss_bce_s + loss_dice_s)
loss_bce_t = F.cross_entropy(predoutt[:train_params['labeled_bs']], label_t[:train_params['labeled_bs']].squeeze(dim=1).long())
predout_t = F.softmax(predoutt, dim=1)
soft_label_t = losses.get_soft_label(label_t[:train_params['labeled_bs']], net_params['num_classes'])
loss_dice_t = dice_criterion(predout_t.permute(0, 2, 3, 1)[:train_params['labeled_bs']][..., 1:], soft_label_t[..., 1:], net_params['num_classes']-1)
loss_t = 0.5 * (loss_bce_t + loss_dice_t)
consistency_loss = 0
consistency_weight = get_current_consistency_weight(iter_num//len(loader_train_s), max_epoch)
consistency_dist = consistency_criterion(predout_t[train_params['labeled_bs']:], ema_output) #(batch, 3, 256, 256)
consistency_dist = torch.mean(consistency_dist)
consistency_loss = consistency_dist * consistency_weight
# contrastive loss
pos_s2t_similar = similar_criterion(high_r_s_sb, high_r_t_tb) / train_params['temp_fac']
den_s2t1_similar = similar_criterion(high_r_s_sb, high_r_s_tb) / train_params['temp_fac']
den_s2t2_similar = similar_criterion(high_r_s_sb, high_r_t_sb) / train_params['temp_fac']
contrast_loss_s2t = -torch.log(torch.exp(pos_s2t_similar)/(torch.exp(pos_s2t_similar)+
torch.sum(torch.exp(den_s2t1_similar)+torch.exp(den_s2t2_similar))))
pos_t2s_similar = similar_criterion(high_r_t_tb, high_r_s_sb) / train_params['temp_fac']
den_t2s1_similar = similar_criterion(high_r_t_tb, high_r_t_sb) / train_params['temp_fac']
den_t2s2_similar = similar_criterion(high_r_t_tb, high_r_s_tb) / train_params['temp_fac']
contrast_loss_t2s = -torch.log(torch.exp(pos_t2s_similar)/(torch.exp(pos_t2s_similar)+
torch.sum(torch.exp(den_t2s1_similar)+torch.exp(den_t2s2_similar))))
contrast_loss_inter = (contrast_loss_s2t + contrast_loss_t2s)/2
contrast_loss = 0.1*torch.mean(contrast_loss_inter)
# total loss
loss = 0.5*(loss_s+loss_t) + consistency_loss + contrast_loss
# backforward
optimizer.zero_grad()
loss.backward()
optimizer.step()
update_ema_variables(model, ema_model, train_params['ema_decay'], iter_num)
# update ema model
if epoch_num > train_params['ema_frozen_epoch']:
update_ema_variables(model, ema_model, train_params['ema_decay'], iter_num)
# tensorboard
iter_num += 1
writer.add_scalar('lr', lr_, iter_num)
writer.add_scalar('loss/loss_bce_s', loss_bce_s, iter_num)
writer.add_scalar('loss/loss_dice_s', loss_dice_s, iter_num)
writer.add_scalar('loss/loss_s', loss_s, iter_num)
writer.add_scalar('loss/loss_bce_t', loss_bce_t, iter_num)
writer.add_scalar('loss/loss_dice_t', loss_dice_t, iter_num)
writer.add_scalar('loss/loss_t', loss_t, iter_num)
writer.add_scalar('loss/contrast_loss', contrast_loss, iter_num)
writer.add_scalar('train/consistency_loss', consistency_loss, iter_num)
# writer.add_scalar('train/consistency_dist', consistency_dist, iter_num)
writer.add_scalar('train/consistency_weight', consistency_weight, iter_num)
# if iter_num % (len(loader_train_s)*10) == 0:
# grid_image = make_grid(image_s, nrow=train_params['batch_size'], normalize=True)
# writer.add_image('train/images_s', grid_image, iter_num)
# grid_image = make_grid(label_s, nrow=train_params['batch_size'], normalize=True)
# writer.add_image('train/ground_truths_s', grid_image, iter_num)
# grid_image = make_grid(pred_mask_s.float(), nrow=train_params['batch_size'], normalize=True)
# writer.add_image('train/preds_s', grid_image, iter_num)
# validation
if iter_num % len(loader_train_s) == 0:
loss_bce_val, loss_dice_val, dice_val_class = _val_on_the_fly(model, loader_valid_t, writer, iter_num)
logging.info('Validation --> loss_bce: %.4f; loss_dice: %.4f; mean_dice: %.4f (cup_dice: %.4f; disc_dice: %.4f)' %
(loss_bce_val.item(), loss_dice_val.item(), torch.mean(dice_val_class).item(), dice_val_class[0].item(), dice_val_class[1].item()))
if torch.mean(dice_val_class) > torch.mean(best_val_dice):
best_val_dice = dice_val_class
best_val_step = iter_num
torch.save(model.state_dict(), os.path.join(common_params['exp_dir'], 'best_model.pth'))
logging.info('********** Best model (dice: %.4f; cup_dice: %.4f; disc_dice: %.4f) is updated at step %d.' %
(torch.mean(dice_val_class).item(), dice_val_class[0].item(), dice_val_class[1].item(),iter_num))
else:
logging.info('********** Best model (dice: %.4f; cup_dice: %.4f; disc_dice: %.4f) was at step %d, current dice: %.4f.' %
(torch.mean(best_val_dice).item(), best_val_dice[0].item(), best_val_dice[1].item(), best_val_step, torch.mean(dice_val_class).item()))
# print losses
if iter_num % len(loader_train_s) == 0:
logging.info('(Iteration %d, lr: %.6f) --> loss_s: %.4f; loss_t: %.4f; loss_bce_t: %.4f; loss_dice_t: %.4f; contrast_loss: %.4f; consistency_loss: %.4f; '
% (iter_num, lr_, loss_s.item(), loss_t.item(), loss_bce_t.item(), loss_dice_t.item(), contrast_loss.item(), consistency_loss.item()))
# save the model
if iter_num % 4500 == 0:
save_mode_path = os.path.join(common_params['exp_dir'],
'iter_%d_dice_%.4f.pth' % (iter_num, torch.mean(dice_val_class).item()))
torch.save(model.state_dict(), save_mode_path)
logging.info("save model to {}".format(save_mode_path))
# change learning rate
if iter_num % train_params['lr_decay_freq'] == 0:
lr_ = train_params['learning_rate'] * 0.95 ** (iter_num // train_params['lr_decay_freq'])
for param_group in optimizer.param_groups:
param_group['lr'] = lr_
def get_current_consistency_weight(epoch, max_epoches):
# Consistency ramp-up from https://arxiv.org/abs/1610.02242
return train_params['consistency_rate'] * ramps.sigmoid_rampup(epoch, max_epoches)
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_param, param in zip(ema_model.parameters(), model.parameters()):
ema_param.data.mul_(alpha).add_(1 - alpha, param.data)
if __name__ == '__main__':
# config file
settings = Settings()
common_params, data_params, net_params, train_params, eval_params = settings['COMMON'], settings['DATA'], settings[
'NETWORK'], settings['TRAINING'], settings['EVAL']
# workspace
shutil.rmtree(common_params['exp_dir'], ignore_errors=True)
os.makedirs(common_params['exp_dir'], exist_ok=True)
shutil.copy('./settings.ini', common_params['exp_dir'])
logging.basicConfig(filename=os.path.join(common_params['exp_dir'], 'logs.txt'),
level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
logging.info('Output path = %s' % common_params['exp_dir'])
if data_params['resplit_data']:
split_filelist(data_params)
# upload dataset
dataset_train_s = RefugeDataset(data_list_dir=data_params['source_data_list_dir'], data_dir=data_params['source_data_dir'],
train_type='train', image_type='image', transform=transforms.Compose([ToTensor()]))
loader_train_s = DataLoader(dataset=dataset_train_s, batch_size=train_params['batch_size'], shuffle=True, num_workers=6,
drop_last=True, pin_memory=True)
dataset_train_t = CmrsegDataset(data_list_dir=data_params['target_data_list_dir'], data_dir=data_params['target_data_dir'],
train_type='train', image_type='image', transform=transforms.Compose([ToTensor()]))
dataset_valid_t = CmrsegDataset(data_list_dir=data_params['target_data_list_dir'], data_dir=data_params['target_data_dir'],
train_type='test', image_type='image', transform=transforms.Compose([ToTensor()]))
labeled_idxs = list(range(round(dataset_train_t.__len__()*data_params['seen_target_percent'])))
unlabled_idxs = list(range(round(dataset_train_t.__len__()*data_params['seen_target_percent']), dataset_train_t.__len__()))
random.shuffle(unlabled_idxs)
batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabled_idxs, train_params['batch_size'], train_params['batch_size']-train_params['labeled_bs'])
def worker_init_fn(worker_id):
random.seed(1337+worker_id)
loader_train_t = DataLoader(dataset_train_t, batch_sampler=batch_sampler, pin_memory=True, worker_init_fn=worker_init_fn)
loader_valid_t = DataLoader(dataset_valid_t, batch_size=1, shuffle=False, num_workers=6,
pin_memory=True, drop_last=False)
# summarywriter
writer = SummaryWriter(log_dir=common_params['exp_dir'])
# model
def create_model(ema=False):
model = Unet_dsbn_cont(net_params).cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
ema_model = create_model(ema=True)
# for var_name in model.state_dict():
# print(f'{var_name}, {model.state_dict()[var_name].shape}')
train(model, ema_model, loader_train_s, loader_train_t, loader_valid_t, train_params, writer)
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/test_cscada_mscmrseg.py | code/test_cscada_mscmrseg.py | import os
import torch
import random
import cv2
import numpy as np
import pandas as pd
import torch.utils.data as Data
import torch.nn.functional as F
from distutils.version import LooseVersion
from dataloader.ms_cmr.Cmr_dataloader import CmrsegDataset, ToTensor
from settings import Settings
from torchvision import transforms
from models.networks.cscada_net import Unet_dsbn_cont
from utils.losses import get_soft_label, val_dice, val_dice_class
from utils.losses import Intersection_over_Union, Intersection_over_Union_class
from utils.binary import assd, precision, recall
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from time import *
### experiment seed
torch.manual_seed(123)
torch.cuda.manual_seed(123)
torch.cuda.manual_seed_all(123)
np.random.seed(123) # Numpy module.
random.seed(123) # Python random module.
torch.manual_seed(123)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def create_visual_anno(anno):
assert np.max(anno) < 7 # only 7 classes are supported, add new color in label2color_dict
label2color_dict = {
0: [0, 0, 0],
1: [0,0,255],
2: [0, 255, 0],
3: [0, 0, 255],
4: [255, 215, 0],
5: [160, 32, 100],
6: [255, 64, 64],
7: [139, 69, 19],
}
# visualize
visual_anno = np.zeros((anno.shape[0], anno.shape[1], 3), dtype=np.uint8)
for i in range(visual_anno.shape[0]): # i for h
for j in range(visual_anno.shape[1]):
color = label2color_dict[anno[i, j]]
visual_anno[i, j, 0] = color[0]
visual_anno[i, j, 1] = color[1]
visual_anno[i, j, 2] = color[2]
return visual_anno
def test(test_loader, model):
dice = []
iou = []
Assd_cup = []
Assd_disc = []
infer_time = []
model.eval()
for step, data_batch in enumerate(test_loader):
img = data_batch['image'].cuda() # data size: B x C x H x W
gt = data_batch['label'].cuda() # label size: B x C x H x W
begin_time = time()
with torch.no_grad():
output, _ = model(img, domain_label=1)
output = F.softmax(output, dim=1)
end_time = time()
pred_time = end_time - begin_time
infer_time.append(pred_time)
output_dis = torch.argmax(output, dim=1, keepdim=True)
output_soft = get_soft_label(output_dis, net_params['num_classes']) # data shape: B x H x W x C
soft_gt = get_soft_label(gt, net_params['num_classes'])
# input_arr = np.squeeze(image.cpu().numpy()).astype(np.float32)
label_arr = soft_gt.permute(0, 3, 1, 2).cpu().numpy().astype(np.uint8) # data size: B x H x W x C
# label_shw = np.squeeze(target.cpu().numpy()).astype(np.uint8)
output_arr = output_soft.permute(0, 3, 1, 2).cpu().numpy().astype(np.uint8) # data shape: B x C x H x W
# save the image
# for i in range(output_dis.shape[0]):
# pred_img = output_dis.squeeze(dim=1)[i, ...].cpu().numpy()
# pred_img = create_visual_anno(pred_img)
# cv2.imwrite(eval_params['snapshot_path']+'/results_img/pred_{}.jpg'.format(str(step*train_params['batch_size']+i+1)), pred_img)
all_iou = Intersection_over_Union_class(output_soft[:, :, :, 1:], soft_gt[:, :, :, 1:], net_params['num_classes']-1) # the iou accuracy
all_dice = val_dice_class(output_soft[:, :, :, 1:], soft_gt[:, :, :, 1:], net_params['num_classes']-1) # the dice accuracy
if 0 != np.count_nonzero(output_arr[:, 1, :, :]):
cup_assd = assd(output_arr[:, 1, :, :], label_arr[:, 1, :, :], voxelspacing=(1, 1.485, 1.485))
elif 0 == np.count_nonzero(output_arr[:, 1, :, :]):
cup_assd = 100
Assd_cup.append(cup_assd)
if 0 != np.count_nonzero(output_arr[:, 2, :, :]):
disc_assd = assd(output_arr[:, 2, :, :], label_arr[:, 2, :, :], voxelspacing=(1, 1.485, 1.485))
elif 0 == np.count_nonzero(output_arr[:, 2, :, :]):
disc_assd = 100
Assd_disc.append(disc_assd)
dice_np = all_dice.cpu().numpy()
dice.append(dice_np)
# all_precision = precision(output_arr[:, 1, :, :], label_arr[:, 1, :, :])
# all_recall = recall(output_arr[:, 1, :, :], label_arr[:, 1, :, :])
iou_np = all_iou.cpu().numpy()
iou.append(iou_np)
df = pd.DataFrame(data=dice)
df.to_csv(eval_params['snapshot_path'] + '/refine_result.csv')
all_dice_mean = np.average(np.average(dice, axis=1))
all_dice_std = np.std(np.average(dice, axis=1))
dice_mean = np.average(dice, axis=0)
dice_std = np.std(dice, axis=0)
iou_mean = np.average(iou, axis=0)
iou_std = np.std(iou, axis=0)
all_assd_mean = np.average(Assd_cup+Assd_disc)
all_assd_std = np.std(Assd_cup+Assd_disc)
cup_assd_mean = np.average(Assd_cup)
cup_assd_std = np.std(Assd_cup)
disc_assd_mean = np.average(Assd_disc)
disc_assd_std = np.std(Assd_disc)
all_time = np.sum(infer_time)
print(dice_mean, dice_std)
print('The cmr mean Accuracy: {cmr_dice_mean: .4f}; The cmr Accuracy std: {cmr_dice_std: .4f}'.format(
cmr_dice_mean=all_dice_mean, cmr_dice_std=all_dice_std))
print('The LV mean dice: {cup_dice_mean: .4f}; The LV dice std: {cup_dice_std: .4f}'.format(
cup_dice_mean=dice_mean[0], cup_dice_std=dice_std[0]))
print('The Myo mean dice: {disc_dice_mean: .4f}; The Myo dice std: {disc_dice_std: .4f}'.format(
disc_dice_mean=dice_mean[1], disc_dice_std=dice_std[1]))
print('The cmr mean Assd: {cmr_dice_mean: .4f}; The cmr Assd std: {cmr_dice_std: .4f}'.format(
cmr_dice_mean=all_assd_mean, cmr_dice_std=all_assd_std))
print('The LV mean Assd: {cmr_assd_mean: .4f}; The LV Assd std: {cmr_assd_std: .4f}'.format(
cmr_assd_mean=cup_assd_mean, cmr_assd_std=cup_assd_std))
print('The Myo mean Assd: {cmr_assd_mean: .4f}; The Myo Assd std: {cmr_assd_std: .4f}'.format(
cmr_assd_mean=disc_assd_mean, cmr_assd_std=disc_assd_std))
print('The inference time: {time: .4f}'.format(time=all_time))
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), 'PyTorch>=0.4.0 is required'
# config file
settings = Settings()
common_params, data_params, net_params, train_params, eval_params = settings['COMMON'], settings['DATA'], settings[
'NETWORK'], settings['TRAINING'], settings['EVAL']
# loading the dataset
print('loading the {0} dataset ...'.format('test'))
test_dataset = CmrsegDataset(data_list_dir=data_params['target_data_list_dir'], data_dir=data_params['target_data_dir'],
train_type='test', image_type='image', transform=transforms.Compose([ToTensor()]))
testloader = Data.DataLoader(dataset=test_dataset, batch_size=train_params['batch_size'], shuffle=False)
print('Loading is done\n')
# define model
def create_model(ema=False):
model = Unet_dsbn_cont(net_params).cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
# Load the trained best model
modelname = os.path.join(eval_params['snapshot_path'], 'best_model.pth')
if os.path.isfile(modelname):
print("=> Loading checkpoint '{}'".format(modelname))
checkpoint = torch.load(modelname)
model.load_state_dict(checkpoint)
# optimizer.load_state_dict(checkpoint['opt_dict'])
print("=> Loaded saved the best model.")
else:
print("=> No checkpoint found at '{}'".format(modelname))
test(testloader, model)
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/weight_init.py | code/models/weight_init.py | import torch
import os
import sys
def init_dcgan_weights(model):
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def initialize_weights(model, init = "xavier"):
init_func = None
if init == "xavier":
init_func = torch.nn.init.xavier_normal_
elif init == "kaiming":
init_func = torch.nn.init.kaiming_normal_
elif init == "gaussian" or init == "normal":
init_func = torch.nn.init.normal_
if init_func is not None:
for module in model.modules():
if isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear):
init_func(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, torch.nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
else:
print("Error when initializing model's weights, {} either doesn't exist or is not a valid initialization function.".format(init), \
file=sys.stderr) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/layers/blocks.py | code/models/layers/blocks.py | import torch.nn as nn
from models.layers.dsbn import DomainSpecificBatchNorm2d
def normalize(x, norm_type, num_domain=1):
if norm_type == 'batchnorm':
return nn.BatchNorm2d(x)
elif norm_type == 'instancenorm':
return nn.InstanceNorm2d(x)
elif norm_type == 'dsbn':
return DomainSpecificBatchNorm2d(x, num_domain)
else:
return nn.BatchNorm2d(x) #temp
def deconv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def deconv(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding),
)
def conv_lrelu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.LeakyReLU(0.2, inplace=True)
)
def conv_bn_lrelu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True)
)
def conv_in_lrelu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.InstanceNorm2d(out_channels),
nn.LeakyReLU(0.2, inplace=True)
)
def conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def conv_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=0):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.ReLU(inplace=True)
)
def conv_no_activ(in_channels, out_channels, kernel_size=3, stride=1, padding=1):
return nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
def conv_id_unet(in_channels, out_channels, norm='batchnorm'):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, 1, 0),
normalize(out_channels, norm),
nn.ReLU(inplace=True)
)
def upconv(in_channels, out_channels, norm='batchnorm'):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
normalize(out_channels, norm)
)
def conv_block_unet(in_channels, out_channels, kernel_size, stride=1, padding=0, norm='batchnorm', num_dm=1):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
normalize(out_channels, norm, num_dm),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding),
normalize(out_channels, norm, num_dm),
nn.LeakyReLU(inplace=True),
)
def conv_block_unet_last(in_channels, out_channels, kernel_size, stride=1, padding=0, norm='batchnorm'):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
normalize(out_channels, norm),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding),
normalize(out_channels, norm),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size, stride, padding)
)
def conv_preactivation_relu(in_channels, out_channels, kernel_size=1, stride=1, padding=0, norm='batchnorm'):
return nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
normalize(out_channels, norm)
)
class ResConv(nn.Module):
def __init__(self, ndf, norm):
super(ResConv, self).__init__()
"""
Args:
ndf: constant number from channels
"""
self.ndf = ndf
self.norm = norm
self.conv1 = conv_preactivation_relu(self.ndf, self.ndf * 2, 3, 1, 1, self.norm)
self.conv2 = conv_preactivation_relu(self.ndf * 2 , self.ndf * 2, 3, 1, 1, self.norm)
self.resconv = conv_preactivation_relu(self.ndf , self.ndf * 2, 1, 1, 0, self.norm)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
residual = self.resconv(residual)
return out + residual
class Interpolate(nn.Module):
def __init__(self, size, mode):
super(Interpolate, self).__init__()
"""
Args:
size: expected size after interpolation
mode: interpolation type (e.g. bilinear, nearest)
"""
self.interp = nn.functional.interpolate
self.size = size
self.mode = mode
def forward(self, x):
out = self.interp(x, size=self.size, mode=self.mode) #, align_corners=False
return out | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/layers/adain.py | code/models/layers/adain.py | import torch
def calc_vector_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
feat_var = feat.var(dim=1) + eps
feat_std = feat_var.sqrt()
feat_mean = feat.mean(dim=1)
return feat_mean, feat_std
def calc_tensor_mean_std(feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def adaptive_instance_normalization(content_feat, style_feat):
size = content_feat.size()
style_mean, style_std = calc_vector_mean_std(style_feat)
content_mean, content_std = calc_tensor_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(
size)) / content_std.expand(size)
return normalized_feat * style_std.view(style_std.shape[0],1,1,1).expand(size) + style_mean.view(style_mean.shape[0],1,1,1).expand(size)
def adaptive_instance_normalization2(content_feat, style_feat):
size = content_feat.size()
style_mean, style_std = calc_vector_mean_std(style_feat)
content_mean, content_std = calc_vector_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.unsqueeze(-1).expand(
size)) / content_std.unsqueeze(-1).expand(size)
return normalized_feat * style_std.unsqueeze(-1).expand(size) + style_mean.unsqueeze(-1).expand(size) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/layers/dsbn.py | code/models/layers/dsbn.py | import numpy as np
from torch import nn
class _DomainSpecificBatchNorm(nn.Module):
_version = 2
def __init__(self, num_features, num_classes, eps=1e-5, momentum=0.1, affine=True,
track_running_stats=True):
super(_DomainSpecificBatchNorm, self).__init__()
# self.bns = nn.ModuleList([nn.modules.batchnorm._BatchNorm(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_classes)])
self.bns = nn.ModuleList(
[nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_classes)])
def reset_running_stats(self):
for bn in self.bns:
bn.reset_running_stats()
def reset_parameters(self):
for bn in self.bns:
bn.reset_parameters()
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, x, domain_label):
self._check_input_dim(x)
bn = self.bns[domain_label]
return bn(x)
class DomainSpecificBatchNorm2d(_DomainSpecificBatchNorm):
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
if __name__ == '__main__':
bn = DomainSpecificBatchNorm2d(16, 5)
for name, paramter in bn.named_parameters():
print(f'{name}:{paramter.shape}')
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/layers/__init__.py | code/models/layers/__init__.py | from .adain import *
from .blocks import * | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/networks/decoder.py | code/models/networks/decoder.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time :
# @Author : Ran Gu
"""
AdaIN-based decoder
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_dim, dim)
self.fc2 = nn.Linear(dim, dim)
self.fc3 = nn.Linear(dim, output_dim)
def forward(self, x):
x = x.view(x.size(0), -1)
out = self.fc1(x)
out = F.relu(out, inplace=True)
out = self.fc2(out)
out = F.relu(out, inplace=True)
out = self.fc3(out)
out = F.relu(out, inplace=True)
return out
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
# weight and bias are dynamically assigned
self.weight = None
self.bias = None
# just dummy buffers, not used
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
# Apply instance norm
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Decoder(nn.Module):
def __init__(self, dim, out_channel):
super(Decoder, self).__init__()
self.conv1 = nn.Conv2d(dim, dim, 3, 1, 1, bias=True)
self.adain1 = AdaptiveInstanceNorm2d(dim)
self.conv2 = nn.Conv2d(dim, dim, 3, 1, 1, bias=True)
self.adain2 = AdaptiveInstanceNorm2d(dim)
self.conv3 = nn.Conv2d(dim, dim, 3, 1, 1, bias=True)
self.adain3 = AdaptiveInstanceNorm2d(dim)
self.conv4 = nn.Conv2d(dim, out_channel, 3, 1, 1, bias=True)
def forward(self,x):
out = self.conv1(x)
out = self.adain1(out)
out = self.conv2(out)
out = self.adain2(out)
out = self.conv3(out)
out = self.adain3(out)
out = self.conv4(out)
out = torch.tanh(out)
return out
class Ada_Decoder(nn.Module):
def __init__(self, anatomy_out_channel, z_length, out_channel):
super(Ada_Decoder, self).__init__()
self.dec = Decoder(anatomy_out_channel, out_channel)
self.mlp = MLP(z_length, self.get_num_adain_params(self.dec), 256)
def forward(self, anatomy, style):
adain_params = self.mlp(style) # [bs, z_length] --> [4, 48]
self.assgin_adain_params(adain_params, self.dec)
images = self.dec(anatomy)
return images
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
def assgin_adain_params(self, adain_params, model):
"""
Assign the adain_params to the AdaIN layers in model
"""
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2 * m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2 * m.num_features:
adain_params = adain_params[:, 2 * m.num_features:]
if __name__ == '__main__':
images = torch.FloatTensor(4, 8, 384, 384).uniform_(-1, 1)
codes = torch.FloatTensor(4, 8).uniform_(-1,1)
model = Ada_Decoder(8,8)
model(images, codes) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/networks/cscada_net.py | code/models/networks/cscada_net.py | # -*- coding: utf-8 -*-
# @Time : 2021/7/21 16:48
# @Author : Ran.Gu
# @Email : guran924@std.uestc.edu.cn
'''
cs-cada uses different normalization.
'''
import random
import torch
import torch.nn as nn
from models.layers.dsbn import DomainSpecificBatchNorm2d
class Unet_dsbn_cont(nn.Module):
def __init__(self, net_params:dict):
super(Unet_dsbn_cont, self).__init__()
self.num_filters = net_params['num_filters']
self.num_channels = net_params['num_channels']
self.num_classes = net_params['num_classes']
self.normalization = net_params['normalization']
self.num_domain = net_params['num_domains']
filters = [self.num_filters,
self.num_filters * 2,
self.num_filters * 4,
self.num_filters * 8,
self.num_filters * 16]
self.conv1 = conv_block(self.num_channels, filters[0], normalization=self.normalization, num_domain=self.num_domain)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = conv_block(filters[0], filters[1], normalization=self.normalization, num_domain=self.num_domain)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.conv3 = conv_block(filters[1], filters[2], normalization=self.normalization, num_domain=self.num_domain)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.conv4 = conv_block(filters[2], filters[3], drop_out=True, normalization=self.normalization, num_domain=self.num_domain)
self.pool4 = nn.MaxPool2d(kernel_size=2)
self.center = conv_block(filters[3], filters[4], drop_out=True, normalization=self.normalization, num_domain=self.num_domain)
# f1 and g1 encoder
self.f1 = nn.Sequential(nn.Conv2d(filters[4], 64, kernel_size=3, padding=1, bias=True),
nn.Conv2d(64, 16, kernel_size=1))
self.g1 = nn.Sequential(nn.Linear(in_features=4096, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=256))
# upsample
self.up4 = UpCatconv(filters[4], filters[3], drop_out=True, normalization=self.normalization, num_domain=self.num_domain)
self.up3 = UpCatconv(filters[3], filters[2], normalization=self.normalization, num_domain=self.num_domain)
self.up2 = UpCatconv(filters[2], filters[1], normalization=self.normalization, num_domain=self.num_domain)
self.up1 = UpCatconv(filters[1], filters[0], normalization=self.normalization, num_domain=self.num_domain)
self.final = nn.Sequential(nn.Conv2d(filters[0], filters[0], kernel_size=1),
nn.Conv2d(filters[0], self.num_classes, kernel_size=1))
def forward(self, x, domain_label):
conv1 = self.conv1(x, domain_label)
pool1 = self.pool1(conv1)
conv2 = self.conv2(pool1, domain_label)
pool2 = self.pool2(conv2)
conv3 = self.conv3(pool2, domain_label)
pool3 = self.pool3(conv3)
conv4 = self.conv4(pool3, domain_label)
pool4 = self.pool4(conv4)
center = self.center(pool4, domain_label)
high_d = self.f1(center)
high_d_represent = self.g1(high_d.reshape(high_d.size(0), -1))
up_4 = self.up4(conv4, center, domain_label)
up_3 = self.up3(conv3, up_4, domain_label)
up_2 = self.up2(conv2, up_3, domain_label)
up_1 = self.up1(conv1, up_2, domain_label)
out = self.final(up_1)
return out, high_d_represent
# conv_block(nn.Module) for U-net convolution block
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out, drop_out=False, normalization='none', num_domain = 6):
super(conv_block, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, padding=1, bias=True)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, padding=1, bias=True)
self.normalization = normalization
if normalization == 'batchnorm':
self.bn = nn.BatchNorm2d(ch_out)
elif normalization == 'instancenorm':
self.bn = nn.InstanceNorm2d(ch_out)
elif normalization == 'dsbn':
self.bn = DomainSpecificBatchNorm2d(ch_out, num_domain)
elif normalization != 'none':
assert False
self.relu = nn.ReLU(inplace=True)
self.dropout = drop_out
def forward(self, x, domain_label):
x = self.conv1(x)
if self.normalization != 'none':
if self.normalization == 'dsbn':
x = self.bn(x, domain_label)
else:
x = self.bn(x)
x = self.relu(x)
x = self.conv2(x)
if self.normalization != 'none':
if self.normalization == 'dsbn':
x = self.bn(x, domain_label)
else:
x = self.bn(x)
x = self.relu(x)
if self.dropout:
x = nn.Dropout2d(0.5)(x)
return x
# # UpCatconv(nn.Module) for U-net UP convolution
class UpCatconv(nn.Module):
def __init__(self, in_feat, out_feat, is_deconv=True, drop_out=False, normalization='none', num_domain = 6):
super(UpCatconv, self).__init__()
self.normalization = normalization
if is_deconv:
self.conv = conv_block(in_feat, out_feat, drop_out=drop_out, normalization=self.normalization,
num_domain=num_domain)
self.up = nn.ConvTranspose2d(in_feat, out_feat, kernel_size=2, stride=2)
else:
self.conv = conv_block(in_feat + out_feat, out_feat, drop_out=drop_out, normalization=self.normalization,
num_domain=num_domain)
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, inputs, down_outputs, domain_label):
outputs = self.up(down_outputs)
out = self.conv(torch.cat([inputs, outputs], dim=1), domain_label)
return out
if __name__ == '__main__':
import numpy as np
net_params = {'num_classes':2, 'num_channels':3, 'num_filters':32,
'num_filters_cond':16, 'num_domains':6, 'normalization':'dsbn'}
model = Unet_dsbn_cont(net_params).cuda()
x = torch.tensor(np.random.random([5, 3, 384, 384]), dtype=torch.float32)
x = x.cuda()
pred = model(x,5)
print(pred.shape) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/models/networks/sdnet.py | code/models/networks/sdnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("/gr/Project/domain/my work/DAG4MIA/code")
import time
from models.layers.blocks import *
from models.layers.adain import *
from models.networks.decoder import Ada_Decoder
class Segmentor(nn.Module):
def __init__(self, num_output_channels, num_class):
super(Segmentor, self).__init__()
self.num_output_channels = num_output_channels
# self.num_classes = num_classes + 1 #background as extra class
self.conv1 = conv_bn_lrelu(self.num_output_channels, 16, 3, 1, 1)
self.conv2 = conv_bn_lrelu(16, 16, 1, 1, 0)
self.pred = nn.Conv2d(16, num_class, 1, 1, 0)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.pred(out)
return out
class AEncoder(nn.Module):
def __init__(self, in_channel, width, height, ndf, num_output_channels, norm, upsample):
super(AEncoder, self).__init__()
"""
UNet encoder for the anatomy factors of the image
num_output_channels: number of spatial (anatomy) factors to encode
"""
self.in_channel = in_channel
self.width = width
self.height = height
self.ndf = ndf
self.num_output_channels = num_output_channels
self.norm = norm
self.upsample = upsample
self.unet = UNet(self.in_channel, self.width, self.height, self.ndf, self.num_output_channels, self.norm, self.upsample)
def forward(self, x):
out = self.unet(x)
out = torch.tanh(out)
return out
class MEncoder(nn.Module):
def __init__(self, z_length, in_channel, img_size):
super(MEncoder, self).__init__()
"""
VAE encoder to extract intensity (modality) information from the image
z_length: length of the output vector
"""
self.z_length = z_length
self.in_channel = in_channel
self.img_size = img_size
self.block1 = conv_bn_lrelu(self.in_channel, 16, 3, 2, 1) # input channel = 1, size = 384
self.block2 = conv_bn_lrelu(16, 32, 3, 2, 1)
self.block3 = conv_bn_lrelu(32, 64, 3, 2, 1)
self.block4 = conv_bn_lrelu(64, 128, 3, 2, 1)
self.fc = nn.Linear(128*pow(self.img_size//16, 2), 32) # 16*16*128
self.norm = nn.BatchNorm1d(32)
self.activ = nn.LeakyReLU(0.03, inplace=True)
self.mu = nn.Linear(32, self.z_length)
self.logvar = nn.Linear(32, self.z_length)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def encode(self, x):
return self.mu(x), self.logvar(x)
def forward(self, img):
"""
input is only the image [bs,3,256,256] without concated anatomy factor
"""
out = self.block1(img)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.fc(out.view(-1, out.shape[1] * out.shape[2] * out.shape[3]))
out = self.norm(out)
out = self.activ(out)
mu, logvar = self.encode(out)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
class UNet(nn.Module):
def __init__(self, in_channel, width, height, ndf, num_output_channels, normalization, upsample):
super(UNet, self).__init__()
"""
UNet autoencoder
"""
self.in_channel = in_channel
self.h = height
self.w = width
self.norm = normalization
self.ndf = ndf
self.num_output_channels = num_output_channels
self.upsample = upsample
self.encoder_block1 = conv_block_unet(self.in_channel, self.ndf, 3, 1, 1, self.norm)
self.encoder_block2 = conv_block_unet(self.ndf, self.ndf * 2, 3, 1, 1, self.norm)
self.encoder_block3 = conv_block_unet(self.ndf * 2, self.ndf * 4, 3, 1, 1, self.norm)
self.encoder_block4 = conv_block_unet(self.ndf * 4, self.ndf * 8, 3, 1, 1, self.norm)
self.maxpool = nn.MaxPool2d(2, 2)
self.bottleneck = ResConv(self.ndf * 8, self.norm)
self.decoder_upsample1 = Interpolate((self.h // 8, self.w // 8), mode=self.upsample)
self.decoder_upconv1 = upconv(self.ndf * 16, self.ndf * 8, self.norm)
self.decoder_block1 = conv_block_unet(self.ndf * 16, self.ndf * 8, 3, 1, 1, self.norm)
self.decoder_upsample2 = Interpolate((self.h // 4, self.w // 4), mode=self.upsample)
self.decoder_upconv2 = upconv(self.ndf * 8, self.ndf * 4, self.norm)
self.decoder_block2 = conv_block_unet(self.ndf * 8, self.ndf * 4, 3, 1, 1, self.norm)
self.decoder_upsample3 = Interpolate((self.h // 2, self.w // 2), mode=self.upsample)
self.decoder_upconv3 = upconv(self.ndf * 4, self.ndf * 2, self.norm)
self.decoder_block3 = conv_block_unet(self.ndf * 4, self.ndf * 2, 3, 1, 1, self.norm)
self.decoder_upsample4 = Interpolate((self.h, self.w), mode=self.upsample)
self.decoder_upconv4 = upconv(self.ndf * 2, self.ndf, self.norm)
self.decoder_block4 = conv_block_unet(self.ndf * 2, self.ndf, 3, 1, 1, self.norm)
self.classifier_conv = nn.Conv2d(self.ndf, self.num_output_channels, 3, 1, 1, 1)
def forward(self, x):
#encoder
s1 = self.encoder_block1(x)
out = self.maxpool(s1)
s2 = self.encoder_block2(out)
out = self.maxpool(s2)
s3 = self.encoder_block3(out)
out = self.maxpool(s3)
s4 = self.encoder_block4(out)
out = self.maxpool(s4)
#bottleneck
out = self.bottleneck(out)
#decoder
out = self.decoder_upsample1(out)
out = self.decoder_upconv1(out)
out = torch.cat((out, s4), 1)
out = self.decoder_block1(out)
out = self.decoder_upsample2(out)
out = self.decoder_upconv2(out)
out = torch.cat((out, s3), 1)
out = self.decoder_block2(out)
out = self.decoder_upsample3(out)
out = self.decoder_upconv3(out)
out = torch.cat((out, s2), 1)
out = self.decoder_block3(out)
out = self.decoder_upsample4(out)
out = self.decoder_upconv4(out)
out = torch.cat((out, s1), 1)
out = self.decoder_block4(out)
out = self.classifier_conv(out)
return out
if __name__ == '__main__':
images = torch.FloatTensor(4, 3, 256, 256).uniform_(-1, 1)
codes = torch.FloatTensor(4, 8).uniform_(-1,1)
model = MEncoder(8)
model(images)
model = AEncoder(256, 256, 32, 2, 'batchnorm', 'nearest')
model(images)
model = Ada_Decoder(8, 8, 2)
model(images, codes) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/utils/binary.py | code/utils/binary.py | # Copyright (C) 2013 Oskar Maier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Oskar Maier
# version r0.1.1
# since 2014-03-13
# status Release
# build-in modules
# third-party modules
import numpy
from scipy.ndimage import _ni_support
from scipy.ndimage.morphology import distance_transform_edt, binary_erosion,\
generate_binary_structure
from scipy.ndimage.measurements import label, find_objects
from scipy.stats import pearsonr
# own modules
# code
def dc(result, reference):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
intersection = numpy.count_nonzero(result & reference)
size_i1 = numpy.count_nonzero(result)
size_i2 = numpy.count_nonzero(reference)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc
def jc(result, reference):
"""
Jaccard coefficient
Computes the Jaccard coefficient between the binary objects in two images.
Parameters
----------
result: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
jc: float
The Jaccard coefficient between the object(s) in `result` and the
object(s) in `reference`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
intersection = numpy.count_nonzero(result & reference)
union = numpy.count_nonzero(result | reference)
jc = float(intersection) / float(union)
return jc
def precision(result, reference):
"""
Precison.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
precision : float
The precision between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of retrieved instances that are relevant. The
precision is not symmetric.
See also
--------
:func:`recall`
Notes
-----
Not symmetric. The inverse of the precision is :func:`recall`.
High precision means that an algorithm returned substantially more relevant results than irrelevant.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tp = numpy.count_nonzero(result & reference)
fp = numpy.count_nonzero(result & ~reference)
try:
precision = tp / float(tp + fp)
except ZeroDivisionError:
precision = 0.0
return precision
def recall(result, reference):
"""
Recall.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
recall : float
The recall between two binary datasets, here mostly binary objects in images,
which is defined as the fraction of relevant instances that are retrieved. The
recall is not symmetric.
See also
--------
:func:`precision`
Notes
-----
Not symmetric. The inverse of the recall is :func:`precision`.
High recall means that an algorithm returned most of the relevant results.
References
----------
.. [1] http://en.wikipedia.org/wiki/Precision_and_recall
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tp = numpy.count_nonzero(result & reference)
fn = numpy.count_nonzero(~result & reference)
try:
recall = tp / float(tp + fn)
except ZeroDivisionError:
recall = 0.0
return recall
def sensitivity(result, reference):
"""
Sensitivity.
Same as :func:`recall`, see there for a detailed description.
See also
--------
:func:`specificity`
"""
return recall(result, reference)
def specificity(result, reference):
"""
Specificity.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
specificity : float
The specificity between two binary datasets, here mostly binary objects in images,
which denotes the fraction of correctly returned negatives. The
specificity is not symmetric.
See also
--------
:func:`sensitivity`
Notes
-----
Not symmetric. The completment of the specificity is :func:`sensitivity`.
High recall means that an algorithm returned most of the irrelevant results.
References
----------
.. [1] https://en.wikipedia.org/wiki/Sensitivity_and_specificity
.. [2] http://en.wikipedia.org/wiki/Confusion_matrix#Table_of_confusion
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
tn = numpy.count_nonzero(~result & ~reference)
fp = numpy.count_nonzero(result & ~reference)
try:
specificity = tn / float(tn + fp)
except ZeroDivisionError:
specificity = 0.0
return specificity
def true_negative_rate(result, reference):
"""
True negative rate.
Same as :func:`specificity`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`positive_predictive_value`
"""
return specificity(result, reference)
def true_positive_rate(result, reference):
"""
True positive rate.
Same as :func:`recall` and :func:`sensitivity`, see there for a detailed description.
See also
--------
:func:`positive_predictive_value`
:func:`true_negative_rate`
"""
return recall(result, reference)
def positive_predictive_value(result, reference):
"""
Positive predictive value.
Same as :func:`precision`, see there for a detailed description.
See also
--------
:func:`true_positive_rate`
:func:`true_negative_rate`
"""
return precision(result, reference)
def hd(result, reference, voxelspacing=None, connectivity=1):
"""
Hausdorff Distance.
Computes the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. It is defined as the maximum surface distance between the objects.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`asd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
hd1 = __surface_distances(result, reference, voxelspacing, connectivity).max()
hd2 = __surface_distances(reference, result, voxelspacing, connectivity).max()
hd = max(hd1, hd2)
return hd
def hd95(result, reference, voxelspacing=None, connectivity=1):
"""
95th percentile of the Hausdorff Distance.
Computes the 95th percentile of the (symmetric) Hausdorff Distance (HD) between the binary objects in two
images. Compared to the Hausdorff Distance, this metric is slightly more stable to small outliers and is
commonly used in Biomedical Segmentation challenges.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
Note that the connectivity influences the result in the case of the Hausdorff distance.
Returns
-------
hd : float
The symmetric Hausdorff Distance between the object(s) in ```result``` and the
object(s) in ```reference```. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`hd`
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
hd1 = __surface_distances(result, reference, voxelspacing, connectivity)
hd2 = __surface_distances(reference, result, voxelspacing, connectivity)
hd95 = numpy.percentile(numpy.hstack((hd1, hd2)), 95)
return hd95
def assd(result, reference, voxelspacing=None, connectivity=1):
"""
Average symmetric surface distance.
Computes the average symmetric surface distance (ASD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing of
elements along each dimension, which is usually given in mm.
See also
--------
:func:`asd`
:func:`hd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> asd(result, reference)
and
>>> asd(reference, result)
The binary images can therefore be supplied in any order.
"""
assd = numpy.mean( (asd(result, reference, voxelspacing, connectivity), asd(reference, result, voxelspacing, connectivity)) )
return assd
def asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance metric.
Computes the average surface distance (ASD) between the binary objects in two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between the object(s) in ``result`` and the
object(s) in ``reference``. The distance unit is the same as for the spacing
of elements along each dimension, which is usually given in mm.
See also
--------
:func:`assd`
:func:`hd`
Notes
-----
This is not a real metric, as it is directed. See `assd` for a real metric of this.
The method is implemented making use of distance images and simple binary morphology
to achieve high computational speed.
Examples
--------
The `connectivity` determines what pixels/voxels are considered the surface of a
binary object. Take the following binary image showing a cross
>>> from scipy.ndimage.morphology import generate_binary_structure
>>> cross = generate_binary_structure(2, 1)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
With `connectivity` set to `1` a 4-neighbourhood is considered when determining the
object surface, resulting in the surface
.. code-block:: python
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
Changing `connectivity` to `2`, a 8-neighbourhood is considered and we get:
.. code-block:: python
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
, as a diagonal connection does no longer qualifies as valid object surface.
This influences the results `asd` returns. Imagine we want to compute the surface
distance of our cross to a cube-like object:
>>> cube = generate_binary_structure(2, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
, which surface is, independent of the `connectivity` value set, always
.. code-block:: python
array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]])
Using a `connectivity` of `1` we get
>>> asd(cross, cube, connectivity=1)
0.0
while a value of `2` returns us
>>> asd(cross, cube, connectivity=2)
0.20000000000000001
due to the center of the cross being considered surface as well.
"""
sds = __surface_distances(result, reference, voxelspacing, connectivity)
asd = sds.mean()
return asd
def ravd(result, reference):
"""
Relative absolute volume difference.
Compute the relative absolute volume difference between the (joined) binary objects
in the two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
ravd : float
The relative absolute volume difference between the object(s) in ``result``
and the object(s) in ``reference``. This is a percentage value in the range
:math:`[-1.0, +inf]` for which a :math:`0` denotes an ideal score.
Raises
------
RuntimeError
If the reference object is empty.
See also
--------
:func:`dc`
:func:`precision`
:func:`recall`
Notes
-----
This is not a real metric, as it is directed. Negative values denote a smaller
and positive values a larger volume than the reference.
This implementation does not check, whether the two supplied arrays are of the same
size.
Examples
--------
Considering the following inputs
>>> import numpy
>>> arr1 = numpy.asarray([[0,1,0],[1,1,1],[0,1,0]])
>>> arr1
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> arr2 = numpy.asarray([[0,1,0],[1,0,1],[0,1,0]])
>>> arr2
array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]])
comparing `arr1` to `arr2` we get
>>> ravd(arr1, arr2)
-0.2
and reversing the inputs the directivness of the metric becomes evident
>>> ravd(arr2, arr1)
0.25
It is important to keep in mind that a perfect score of `0` does not mean that the
binary objects fit exactely, as only the volumes are compared:
>>> arr1 = numpy.asarray([1,0,0])
>>> arr2 = numpy.asarray([0,0,1])
>>> ravd(arr1, arr2)
0.0
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
vol1 = numpy.count_nonzero(result)
vol2 = numpy.count_nonzero(reference)
if 0 == vol2:
raise RuntimeError('The second supplied array does not contain any binary object.')
return (vol1 - vol2) / float(vol2)
def volume_correlation(results, references):
r"""
Volume correlation.
Computes the linear correlation in binary object volume between the
contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
"""
results = numpy.atleast_2d(numpy.array(results).astype(numpy.bool))
references = numpy.atleast_2d(numpy.array(references).astype(numpy.bool))
results_volumes = [numpy.count_nonzero(r) for r in results]
references_volumes = [numpy.count_nonzero(r) for r in references]
return pearsonr(results_volumes, references_volumes) # returns (Pearson'
def volume_change_correlation(results, references):
r"""
Volume change correlation.
Computes the linear correlation of change in binary object volume between
the contents of the successive binary images supplied. Measured through
the Pearson product-moment correlation coefficient.
Parameters
----------
results : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
references : sequence of array_like
Ordered list of input data containing objects. Each array_like will be
converted into binary: background where 0, object everywhere else.
The order must be the same as for ``results``.
Returns
-------
r : float
The correlation coefficient between -1 and 1.
p : float
The two-side p value.
"""
results = numpy.atleast_2d(numpy.array(results).astype(numpy.bool))
references = numpy.atleast_2d(numpy.array(references).astype(numpy.bool))
results_volumes = numpy.asarray([numpy.count_nonzero(r) for r in results])
references_volumes = numpy.asarray([numpy.count_nonzero(r) for r in references])
results_volumes_changes = results_volumes[1:] - results_volumes[:-1]
references_volumes_changes = references_volumes[1:] - references_volumes[:-1]
return pearsonr(results_volumes_changes, references_volumes_changes) # returns (Pearson's correlation coefficient, 2-tailed p-value)
def obj_assd(result, reference, voxelspacing=None, connectivity=1):
"""
Average symmetric surface distance.
Computes the average symmetric surface distance (ASSD) between the binary objects in
two images.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
assd : float
The average symmetric surface distance between all mutually existing distinct
binary object(s) in ``result`` and ``reference``. The distance unit is the same as for
the spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_asd`
Notes
-----
This is a real metric, obtained by calling and averaging
>>> obj_asd(result, reference)
and
>>> obj_asd(reference, result)
The binary images can therefore be supplied in any order.
"""
assd = numpy.mean( (obj_asd(result, reference, voxelspacing, connectivity), obj_asd(reference, result, voxelspacing, connectivity)) )
return assd
def obj_asd(result, reference, voxelspacing=None, connectivity=1):
"""
Average surface distance between objects.
First correspondences between distinct binary objects in reference and result are
established. Then the average surface distance is only computed between corresponding
objects. Correspondence is defined as unique and at least one voxel overlap.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
voxelspacing : float or sequence of floats, optional
The voxelspacing in a distance unit i.e. spacing of elements
along each dimension. If a sequence, must be of length equal to
the input rank; if a single number, this is used for all axes. If
not specified, a grid spacing of unity is implied.
connectivity : int
The neighbourhood/connectivity considered when determining what accounts
for a distinct binary object as well as when determining the surface
of the binary objects. This value is passed to
`scipy.ndimage.morphology.generate_binary_structure` and should usually be :math:`> 1`.
The decision on the connectivity is important, as it can influence the results
strongly. If in doubt, leave it as it is.
Returns
-------
asd : float
The average surface distance between all mutually existing distinct binary
object(s) in ``result`` and ``reference``. The distance unit is the same as for the
spacing of elements along each dimension, which is usually given in mm.
See also
--------
:func:`obj_assd`
:func:`obj_tpr`
:func:`obj_fpr`
Notes
-----
This is not a real metric, as it is directed. See `obj_assd` for a real metric of this.
For the understanding of this metric, both the notions of connectedness and surface
distance are essential. Please see :func:`obj_tpr` and :func:`obj_fpr` for more
information on the first and :func:`asd` on the second.
Examples
--------
>>> arr1 = numpy.asarray([[1,1,1],[1,1,1],[1,1,1]])
>>> arr2 = numpy.asarray([[0,1,0],[0,1,0],[0,1,0]])
>>> arr1
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> arr2
array([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
>>> obj_asd(arr1, arr2)
1.5
>>> obj_asd(arr2, arr1)
0.333333333333
With the `voxelspacing` parameter, the distances between the voxels can be set for
each dimension separately:
>>> obj_asd(arr1, arr2, voxelspacing=(1,2))
1.5
>>> obj_asd(arr2, arr1, voxelspacing=(1,2))
0.333333333333
More examples depicting the notion of object connectedness:
>>> arr1 = numpy.asarray([[1,0,1],[1,0,0],[0,0,0]])
>>> arr2 = numpy.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 0]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr2, arr1)
0.0
>>> arr1 = numpy.asarray([[1,0,1],[1,0,1],[0,0,1]])
>>> arr2 = numpy.asarray([[1,0,1],[1,0,0],[0,0,1]])
>>> arr1
array([[1, 0, 1],
[1, 0, 1],
[0, 0, 1]])
>>> arr2
array([[1, 0, 1],
[1, 0, 0],
[0, 0, 1]])
>>> obj_asd(arr1, arr2)
0.6
>>> obj_asd(arr2, arr1)
0.0
Influence of `connectivity` parameter can be seen in the following example, where
with the (default) connectivity of `1` the first array is considered to contain two
objects, while with an increase connectivity of `2`, just one large object is
detected.
>>> arr1 = numpy.asarray([[1,0,0],[0,1,1],[0,1,1]])
>>> arr2 = numpy.asarray([[1,0,0],[0,0,0],[0,0,0]])
>>> arr1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> arr2
array([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> obj_asd(arr1, arr2)
0.0
>>> obj_asd(arr1, arr2, connectivity=2)
1.742955328
Note that the connectivity also influence the notion of what is considered an object
surface voxels.
"""
sds = list()
labelmap1, labelmap2, _a, _b, mapping = __distinct_binary_object_correspondences(result, reference, connectivity)
slicers1 = find_objects(labelmap1)
slicers2 = find_objects(labelmap2)
for lid2, lid1 in list(mapping.items()):
window = __combine_windows(slicers1[lid1 - 1], slicers2[lid2 - 1])
object1 = labelmap1[window] == lid1
object2 = labelmap2[window] == lid2
sds.extend(__surface_distances(object1, object2, voxelspacing, connectivity))
asd = numpy.mean(sds)
return asd
def obj_fpr(result, reference, connectivity=1):
"""
The false positive rate of distinct binary object detection.
The false positive rates gives a percentage measure of how many distinct binary
objects in the second array do not exists in the first array. A partial overlap
(of minimum one voxel) is here considered sufficient.
In cases where two distinct binary object in the second array overlap with a single
distinct object in the first array, only one is considered to have been detected
successfully and the other is added to the count of false positives.
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | true |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/utils/utils_fundus.py | code/utils/utils_fundus.py | import os.path as osp
import numpy as np
import os
import cv2
import random
from skimage import morphology
import scipy
from PIL import Image
from matplotlib.pyplot import imsave
# from keras.preprocessing import image
from skimage.measure import label, regionprops
from skimage.transform import rotate, resize
from skimage import measure, draw
import torch
from skimage.morphology import disk, erosion, dilation, opening, closing, white_tophat
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def get_largest_fillhole(binary):
label_image = label(binary)
regions = regionprops(label_image)
area_list = []
for region in regions:
area_list.append(region.area)
if area_list:
idx_max = np.argmax(area_list)
binary[label_image != idx_max + 1] = 0
return scipy.ndimage.binary_fill_holes(np.asarray(binary).astype(int))
def postprocessing(prediction, threshold=0.75, dataset='G'):
if dataset[0] == 'D':
# prediction = prediction.numpy()
prediction = torch.sigmoid(prediction).data.cpu().numpy()
prediction_copy = np.copy(prediction)
disc_mask = prediction[1]
cup_mask = prediction[0]
disc_mask = (disc_mask > 0.5) # return binary mask
cup_mask = (cup_mask > 0.1) # return binary mask
disc_mask = disc_mask.astype(np.uint8)
cup_mask = cup_mask.astype(np.uint8)
for i in range(5):
disc_mask = scipy.signal.medfilt2d(disc_mask, 5)
cup_mask = scipy.signal.medfilt2d(cup_mask, 5)
# disc_mask = morphology.binary_erosion(disc_mask, morphology.diamond(7)).astype(np.uint8) # return 0,1
# cup_mask = morphology.binary_erosion(cup_mask, morphology.diamond(7)).astype(np.uint8) # return 0,1
disc_mask = get_largest_fillhole(disc_mask).astype(np.uint8) # return 0,1
cup_mask = get_largest_fillhole(cup_mask).astype(np.uint8)
prediction_copy[0] = cup_mask
prediction_copy[1] = disc_mask
return prediction_copy
else:
prediction = torch.sigmoid(prediction).data.cpu().numpy()
prediction_copy = np.copy(prediction)
prediction_copy = (prediction_copy > threshold) # return binary mask
prediction_copy = prediction_copy.astype(np.uint8)
disc_mask = prediction_copy[1]
cup_mask = prediction_copy[0]
disc_mask = scipy.signal.medfilt2d(disc_mask, 5)
cup_mask = scipy.signal.medfilt2d(cup_mask, 5)
# disc_mask = morphology.erosion(disc_mask, morphology.diamond(3)) # return 0,1
# cup_mask = morphology.erosion(cup_mask, morphology.diamond(3)) # return 0,1
disc_mask = get_largest_fillhole(disc_mask).astype(np.uint8) # return 0,1
cup_mask = get_largest_fillhole(cup_mask).astype(np.uint8)
prediction_copy[0] = cup_mask
prediction_copy[1] = disc_mask
# selem = disk(6)
# disc_mask = morphology.closing(disc_mask, selem)
# cup_mask = morphology.closing(cup_mask, selem)
# print(sum(disc_mask))
return prediction_copy
def joint_val_image(image, prediction, mask):
ratio = 0.5
_pred_cup = np.zeros([mask.shape[-2], mask.shape[-1], 3])
_pred_disc = np.zeros([mask.shape[-2], mask.shape[-1], 3])
_mask = np.zeros([mask.shape[-2], mask.shape[-1], 3])
image = np.transpose(image, (1, 2, 0))
_pred_cup[:, :, 0] = prediction[0]
_pred_cup[:, :, 1] = prediction[0]
_pred_cup[:, :, 2] = prediction[0]
_pred_disc[:, :, 0] = prediction[1]
_pred_disc[:, :, 1] = prediction[1]
_pred_disc[:, :, 2] = prediction[1]
_mask[:,:,0] = mask[0]
_mask[:,:,1] = mask[1]
pred_cup = np.add(ratio * image, (1 - ratio) * _pred_cup)
pred_disc = np.add(ratio * image, (1 - ratio) * _pred_disc)
mask_img = np.add(ratio * image, (1 - ratio) * _mask)
joint_img = np.concatenate([image, mask_img, pred_cup, pred_disc], axis=1)
return joint_img
def save_val_img(path, epoch, img):
name = osp.join(path, "visualization", "epoch_%d.png" % epoch)
out = osp.join(path, "visualization")
if not osp.exists(out):
os.makedirs(out)
img_shape = img[0].shape
stack_image = np.zeros([len(img) * img_shape[0], img_shape[1], img_shape[2]])
for i in range(len(img)):
stack_image[i * img_shape[0] : (i + 1) * img_shape[0], :, : ] = img[i]
imsave(name, stack_image)
def save_per_img(patch_image, data_save_path, img_name, prob_map, gt=None, mask_path=None, ext="bmp"):
path1 = os.path.join(data_save_path, 'overlay', img_name.split('.')[0]+'.png')
path0 = os.path.join(data_save_path, 'original_image', img_name.split('.')[0]+'.png')
if not os.path.exists(os.path.dirname(path0)):
os.makedirs(os.path.dirname(path0))
if not os.path.exists(os.path.dirname(path1)):
os.makedirs(os.path.dirname(path1))
disc_map = prob_map[0]
cup_map = prob_map[1]
size = disc_map.shape
disc_map[:, 0] = np.zeros(size[0])
disc_map[:, size[1] - 1] = np.zeros(size[0])
disc_map[0, :] = np.zeros(size[1])
disc_map[size[0] - 1, :] = np.zeros(size[1])
size = cup_map.shape
cup_map[:, 0] = np.zeros(size[0])
cup_map[:, size[1] - 1] = np.zeros(size[0])
cup_map[0, :] = np.zeros(size[1])
cup_map[size[0] - 1, :] = np.zeros(size[1])
# disc_mask = (disc_map > 0.75) # return binary mask
# cup_mask = (cup_map > 0.75)
# disc_mask = disc_mask.astype(np.uint8)
# cup_mask = cup_mask.astype(np.uint8)
contours_disc = measure.find_contours(disc_map, 0.5)
contours_cup = measure.find_contours(cup_map, 0.5)
for n, contour in enumerate(contours_cup):
patch_image[(contour[:, 0]).astype(int), (contour[:, 1]).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1]).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1] + 1.0).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] + 1.0).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1]).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1] - 1.0).astype(int), :] = [0, 255, 0]
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] - 1.0).astype(int), :] = [0, 255, 0]
for n, contour in enumerate(contours_disc):
patch_image[contour[:, 0].astype(int), contour[:, 1].astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1]).astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1] + 1.0).astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] + 1.0).astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1]).astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1] - 1.0).astype(int), :] = [0, 0, 255]
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] - 1.0).astype(int), :] = [0, 0, 255]
disc_mask = get_largest_fillhole(gt[0].numpy()).astype(np.uint8) # return 0,1
cup_mask = get_largest_fillhole(gt[1].numpy()).astype(np.uint8)
contours_disc = measure.find_contours(disc_mask, 0.5)
contours_cup = measure.find_contours(cup_mask, 0.5)
red = [255, 0, 0]
for n, contour in enumerate(contours_cup):
patch_image[(contour[:, 0]).astype(int), (contour[:, 1]).astype(int), :] = red
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1]).astype(int), :] = red
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1] + 1.0).astype(int), :] = red
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] + 1.0).astype(int), :] = red
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1]).astype(int), :] = red
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1] - 1.0).astype(int), :] = red
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] - 1.0).astype(int), :] = red
for n, contour in enumerate(contours_disc):
patch_image[contour[:, 0].astype(int), contour[:, 1].astype(int), :] = red
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1]).astype(int), :] = red
patch_image[(contour[:, 0] + 1.0).astype(int), (contour[:, 1] + 1.0).astype(int), :] = red
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] + 1.0).astype(int), :] = red
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1]).astype(int), :] = red
patch_image[(contour[:, 0] - 1.0).astype(int), (contour[:, 1] - 1.0).astype(int), :] = red
patch_image[(contour[:, 0]).astype(int), (contour[:, 1] - 1.0).astype(int), :] = red
patch_image = patch_image.astype(np.uint8)
patch_image = Image.fromarray(patch_image)
patch_image.save(path1)
def sample_minibatch_fundus(stl_image, n_parts, n_samples):
'''
Create a batch with 'n_parts' number of 2D images where n_parts is number of 2D slices.
input param:
stl_image: input batch of 2D style codes
n_parts: number of slices per
n_samples: number of sample times
return:
fin_batch: swapped batch of 2D images.
'''
#select indexes of 'm' slices out of total M.
fin_batch=[]
for step in range(n_samples):
im_ns=random.sample(range(0, stl_image.size(0)), n_parts)
for vol_index in im_ns:
#if n_parts=4, then for each volume: create 4 partitions, pick 4 samples overall (1 from each partition randomly)
fin_batch.append(stl_image[vol_index])
return torch.stack(fin_batch, dim=0) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/utils/losses.py | code/utils/losses.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/5/6 16:55
# @Author : Ran.Gu
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
def get_soft_label(input_tensor, num_class):
"""
convert a label tensor to soft label
input_tensor: tensor with shape [N, C, H, W]
output_tensor: shape [N, H, W, num_class]
"""
tensor_list = []
input_tensor = input_tensor.permute(0, 2, 3, 1)
for i in range(num_class):
temp_prob = torch.eq(input_tensor, i * torch.ones_like(input_tensor))
tensor_list.append(temp_prob)
output_tensor = torch.cat(tensor_list, dim=-1)
output_tensor = output_tensor.float()
return output_tensor
class SoftDiceLoss(_Loss):
'''
Soft_Dice = 2*|dot(A, B)| / (|dot(A, A)| + |dot(B, B)| + eps)
eps is a small constant to avoid zero division,
'''
def __init__(self, *args, **kwargs):
super(SoftDiceLoss, self).__init__()
def forward(self, prediction, soft_ground_truth, num_class=3, weight_map=None, eps=1e-8):
dice_loss = soft_dice_loss(prediction, soft_ground_truth, num_class, weight_map)
return dice_loss
def soft_dice_loss(prediction, soft_ground_truth, num_class, weight_map=None):
predict = prediction.permute(0, 2, 3, 1)
pred = predict.contiguous().view(-1, num_class)
# pred = F.softmax(pred, dim=1)
ground = soft_ground_truth.view(-1, num_class)
n_voxels = ground.size(0)
if weight_map is not None:
weight_map = weight_map.view(-1)
weight_map_nclass = weight_map.repeat(num_class).view_as(pred)
ref_vol = torch.sum(weight_map_nclass * ground, 0)
intersect = torch.sum(weight_map_nclass * ground * pred, 0)
seg_vol = torch.sum(weight_map_nclass * pred, 0)
else:
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
dice_score = (2.0 * intersect) / (ref_vol + seg_vol + 1e-5)
# dice_loss = 1.0 - torch.mean(dice_score)
# return dice_loss
dice_score = torch.mean(-torch.log(dice_score))
return dice_score
class DiceLoss(_Loss):
'''
Soft_Dice = 2*|dot(A, B)| / (|dot(A, A)| + |dot(B, B)| + eps)
eps is a small constant to avoid zero division,
'''
def __init__(self, *args, **kwargs):
super(DiceLoss, self).__init__()
def forward(self, prediction, soft_ground_truth, num_class=3, weight_map=None, eps=1e-8):
dice_loss = dice_loss_mute(prediction, soft_ground_truth, num_class, weight_map)
return dice_loss
def dice_loss_mute(prediction, soft_ground_truth, num_class, weight_map=None):
pred = prediction.reshape(-1, num_class)
ground = soft_ground_truth.reshape(-1, num_class)
n_voxels = ground.size(0)
if weight_map is not None:
weight_map = weight_map.view(-1)
weight_map_nclass = weight_map.repeat(num_class).view_as(pred)
ref_vol = torch.sum(weight_map_nclass * ground, 0)
intersect = torch.sum(weight_map_nclass * ground * pred, 0)
seg_vol = torch.sum(weight_map_nclass * pred, 0)
else:
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
dice_score = (2.0 * intersect) / (ref_vol + seg_vol + 1e-5)
dice_score = 1.0 - torch.mean(dice_score)
return dice_score
def val_dice_class(prediction, soft_ground_truth, num_class):
'''
calculate the dice loss in each class case in multi-class problem
'''
pred = prediction.reshape(-1, num_class)
ground = soft_ground_truth.reshape(-1, num_class)
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
dice_score = 2.0 * intersect / (ref_vol + seg_vol + 1.0)
return dice_score
def val_dice(prediction, soft_ground_truth, num_class):
'''
calculate the mean dice loss in all case
'''
pred = prediction.contiguous().view(-1, num_class)
ground = soft_ground_truth.view(-1, num_class)
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
dice_score = 2.0 * intersect / (ref_vol + seg_vol + 1.0)
dice_mean_score = torch.mean(dice_score)
return dice_mean_score
def eval_dice_dq(gt_y, pred_y, detail=False):
class_map = { # a map used for mapping label value to its name, used for output
"0": "bg",
"1": "CZ",
"2": "prostate"
}
dice = []
for cls in range(1, 2):
gt = torch.zeros(gt_y.shape).cuda()
pred = torch.zeros(pred_y.shape).cuda()
gt[gt_y == cls] = 1
pred[pred_y == cls] = 1
dice_this = 2*torch.sum(gt*pred)/(torch.sum(gt)+torch.sum(pred))
dice.append(dice_this)
return dice
def Intersection_over_Union_class(prediction, soft_ground_truth, num_class):
'''
calculate the IoU in each class case in multi-class problem
'''
pred = prediction.contiguous().view(-1, num_class)
ground = soft_ground_truth.view(-1, num_class)
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
iou_score = intersect / (ref_vol + seg_vol - intersect + 1.0)
return iou_score
def Intersection_over_Union(prediction, soft_ground_truth, num_class):
'''
calculate the mean IoU in all case
'''
pred = prediction.contiguous().view(-1, num_class)
ground = soft_ground_truth.view(-1, num_class)
ref_vol = torch.sum(ground, 0)
intersect = torch.sum(ground * pred, 0)
seg_vol = torch.sum(pred, 0)
iou_score = intersect / (ref_vol + seg_vol - intersect + 1.0)
iou_mean_score = torch.mean(iou_score)
return iou_mean_score
def entropy_loss(p,C=2):
## p N*C*W*H*D
y1 = -1*torch.sum(p*torch.log(p+1e-6), dim=1)/torch.tensor(np.log(C)).cuda()
ent = torch.mean(y1)
return ent
def dice_loss(score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target)
z_sum = torch.sum(score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def softmax_dice_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
n = input_logits.shape[1]
dice = 0
for i in range(0, n):
dice += dice_loss(input_softmax[:, i], target_softmax[:, i])
mean_dice = dice / n
return mean_dice
def entropy_loss_map(p, C=2):
ent = -1*torch.sum(p * torch.log(p + 1e-6), dim=1, keepdim=True)/torch.tensor(np.log(C)).cuda()
return ent
def softmax_mse_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns MSE loss
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_softmax = F.softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
mse_loss = (input_softmax-target_softmax)**2
return mse_loss
def mse_loss(score, target):
mse_loss = (score - target) ** 2
return mse_loss
def kl_loss(score, target):
kl_div = F.kl_div(score, target, reduction='none')
return kl_div
def KL_divergence(logvar, mu):
kld = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=-1)
return kld.mean()
def softmax_kl_loss(input_logits, target_logits):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='none')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div
def symmetric_mse_loss(input1, input2):
"""Like F.mse_loss but sends gradients to both directions
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to both input1 and input2.
"""
assert input1.size() == input2.size()
return torch.mean((input1 - input2)**2)
def log_gaussian(x, mu, logvar):
PI = mu.new([np.pi])
x = x.view(x.shape[0], -1)
mu = mu.view(x.shape[0], -1)
logvar = logvar.view(x.shape[0], -1)
N, D = x.shape
log_norm = (-1 / 2) * (D * torch.log(2 * PI) +
logvar.sum(dim=1) +
(((x - mu) ** 2) / (logvar.exp())).sum(dim=1))
return log_norm | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/utils/average_meter.py | code/utils/average_meter.py | class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0.
self.avg = 0.
self.sum = 0.
self.count = 0.
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/dataloader/ms_cmr/make_datalist.py | code/dataloader/ms_cmr/make_datalist.py | '''
This is for MS-CMRSeg challenge dataset
'''
import os, sys
import re
import math
import random
import numpy as np
os.chdir(sys.path[0])
DATA_PATH = '../../Data/MSCMR_C0_45/image'
SAVE_PATH = '.'
def tryint(s):
try:
return int(s)
except ValueError:
return s
def str2int(v_str):
return [tryint(sub_str) for sub_str in re.split('([0-9]+)', v_str)]
def group_by_element(lst):
result = [[]]
length = len(lst)
for i in range(length):
if i < length - 1:
if lst[i].split('_')[0] == lst[i + 1].split('_')[0]:
result[-1].append(lst[i])
else:
result[-1].append(lst[i])
result.append([])
result[-1].append(lst[i])
return result
def make_filelist():
file_folder = os.path.join(DATA_PATH)
case_folder = sorted([x for x in os.listdir(file_folder)])
case_folder.sort(key=str2int)
# case_folder_list = list(set(case_folder))
# case_folder_list = sorted(case_folder_list)
if not os.path.isdir(os.path.join(SAVE_PATH)):
os.makedirs(os.path.join(SAVE_PATH))
# train_list = [os.path.join(DATA_PATH.replace('..', '/mnt/lustre/guran'), site, x) for x in case_folder]
data_list = [os.path.join(x.split('.')[0]) for x in case_folder]
## split the datalist to train, valid, test
# datalist_group = group_by_element(data_list)
text_save(os.path.join(SAVE_PATH, 'data_list'), data_list)
def split_filelist(data_params):
with open(os.path.join(data_params['target_data_list_dir'], 'data_list'), 'r') as f:
target_img_list = f.readlines()
target_img_list = [item.replace('\n', '') for item in target_img_list]
patient_list = list(set([x.split('_')[0] for x in target_img_list]))
random.shuffle(patient_list)
train_target_patient_list = patient_list[:math.ceil(len(patient_list)*0.1)] # train:test=1:1
train_target_list = [x.split('.')[0] for x in target_img_list if x.split('_')[0] in train_target_patient_list]
text_save(os.path.join(data_params['target_data_list_dir'], 'train_ft01_list'), train_target_list)
# valid_target_patient_list = patient_list[len(train_target_patient_list):(len(patient_list)-len(train_target_patient_list))//3+len(train_target_patient_list)]
# valid_target_list = [x.split('.')[0] for x in target_img_list if x.split('_')[0] in valid_target_patient_list]
# text_save(os.path.join(data_params['target_data_list_dir'], 'valid_list'), valid_target_list)
# test_target_patient_list = patient_list[(len(patient_list)-len(train_target_patient_list))//3+len(train_target_patient_list):]
test_target_patient_list = patient_list[len(train_target_patient_list):] # train:test=1:1
test_target_list = [x.split('.')[0] for x in target_img_list if x.split('_')[0] in test_target_patient_list]
text_save(os.path.join(data_params['target_data_list_dir'], 'test_ft01_list'), test_target_list)
print("Split date list successfully")
def text_save(filename, data): # filename: path to write CSV, data: data list to be written.
file = open(filename, 'w+')
for i in range(len(data)):
s = str(data[i]).replace('[', '').replace(']', '')
s = s.replace("'", '').replace(',', '') + '\n'
file.write(s)
file.close()
print("Save {} successfully".format(filename.split('/')[-1]))
def text_lb_save(filename, data): # filename: path to write CSV, data: data list to be written.
file = open(filename, 'w+')
for i in range(len(data)):
s = str(data[i]).replace('[', '').replace(']', '')
if i % 2 == 1:
s = s.replace("'", '').replace('_segmentation.nii.gz,', '_segmentation.nii.gz') + '\n'
else:
s = s.replace("'", '') + ','
file.write(s)
file.close()
print("Save {} successfully".format(filename.split('/')[-1]))
if __name__ == '__main__':
# make_filelist()
split_filelist()
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/dataloader/ms_cmr/Cmr_dataloader.py | code/dataloader/ms_cmr/Cmr_dataloader.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/5/8 22:02
# @Author : Ran.Gu
'''
Define a dataset class for MS-CMRSeg challenge (.png, .jpg) dataset
'''
import os
import torch
import numpy as np
import itertools
import imageio
import cv2
import random
import matplotlib.pyplot as plt
from os import listdir
from os.path import join
from PIL import Image
from scipy import misc
from torch.utils.data.dataset import Dataset
from torch.utils.data.sampler import Sampler
def itensity_normalize(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized n d volume
"""
# pixels = volume[volume > 0]
mean = volume.mean()
std = volume.std()
out = (volume - mean) / std
# out_random = np.random.normal(0, 1, size=volume.shape)
# out[volume == 0] = out_random[volume == 0]
return out
class CmrsegDataset(Dataset):
def __init__(self, data_list_dir='./Datasets/MS_CMRSeg', data_dir='./Data/MSCMR_C0_45',
train_type='train', image_type='image', percent=1.0, transform=None):
self.transform = transform
self.train_type = train_type
self.data_list_dir = data_list_dir
self.data_dir = data_dir
self.image_type = image_type
if self.train_type in ['train', 'valid', 'test']:
# this is for cross validation
with open(join(self.data_list_dir, self.train_type+'_list'),
'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '') for item in self.image_list]
if percent < 1 and percent > 0:
random.shuffle(self.image_list)
self.image_list = self.image_list[:round(len(self.image_list)*percent)]
self.data = [join(self.data_dir, self.image_type, x+'.png') for x in self.image_list]
self.mask = [join(self.data_dir, 'label', x+'.png') for x in self.image_list]
print("totoal {} {} samples".format(len(self.data), self.train_type))
else:
print("Choosing type error, You have to choose the loading data type including: train, validation, test")
assert len(self.data) == len(self.mask)
def __getitem__(self, item: int):
'''
scipy.misc reads the '.jpg' images, the read data's format is HxWxC
'''
slice_name = self.data[item].rsplit('/', maxsplit=1)[-1].split('.')[0]
image = cv2.imread(self.data[item], cv2.IMREAD_GRAYSCALE)[:, :, np.newaxis]
image = itensity_normalize(image)
label = cv2.imread(self.mask[item], cv2.IMREAD_GRAYSCALE)[:, :, np.newaxis]
label[label == 128] = 1
label[label == 255] = 2
assert (label.any() > 2) == False
sample = {'slice_name': slice_name, 'image': image, 'label': label}
if self.transform is not None:
# TODO: transformation to argument datasets
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.data)
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'slice_name': sample['slice_name'], 'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros((self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'slice_name': sample['slice_name'], 'image': image, 'label': label, 'onehot_label': onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
label = sample['label']
image, label = image.transpose([2, 0, 1]), label.transpose([2, 0, 1])
image, label = image.astype(np.float32), label.astype(np.float32)
# image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'slice_name': sample['slice_name'], 'image': torch.from_numpy(image),
'label': torch.from_numpy(label), 'onehot_label': torch.from_numpy(sample['onehot_label'])}
else:
return {'slice_name': sample['slice_name'], 'image': torch.from_numpy(image), 'label': torch.from_numpy(label)}
class TwoStreamBatchSampler(Sampler):
"""Iterate two sets of indices
An 'epoch' is one iteration through the primary indices.
During the epoch, the secondary indices are iterated through
as many times as needed.
"""
def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):
self.primary_indices = primary_indices
self.secondary_indices = secondary_indices
self.secondary_batch_size = secondary_batch_size
self.primary_batch_size = batch_size - secondary_batch_size
assert len(self.primary_indices) >= self.primary_batch_size > 0
assert len(self.secondary_indices) >= self.secondary_batch_size > 0
def __iter__(self):
primary_iter = iterate_once(self.primary_indices)
secondary_iter = iterate_eternally(self.secondary_indices)
return (
primary_batch + secondary_batch
for (primary_batch, secondary_batch)
in zip(grouper(primary_iter, self.primary_batch_size),
grouper(secondary_iter, self.secondary_batch_size))
)
def __len__(self):
return len(self.primary_indices) // self.primary_batch_size
def iterate_once(iterable):
return np.random.permutation(iterable)
def iterate_eternally(indices):
def infinite_shuffles():
while True:
yield np.random.permutation(indices)
return itertools.chain.from_iterable(infinite_shuffles())
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args) | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/dataloader/ms_fundus/Refuge_dataloader.py | code/dataloader/ms_fundus/Refuge_dataloader.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/5/8 22:02
# @Author : Ran.Gu
'''
Define a dataset class for REFUGE challenge (.png, .jpg) dataset
'''
import os
import torch
import numpy as np
import cv2
import imageio
import matplotlib.pyplot as plt
from os import listdir
from os.path import join
from PIL import Image
from scipy import misc
from torch.utils.data.dataset import Dataset
def itensity_normalize(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized n d volume
"""
# pixels = volume[volume > 0]
mean = volume.mean()
std = volume.std()
out = (volume - mean) / std
# out_random = np.random.normal(0, 1, size=volume.shape)
# out[volume == 0] = out_random[volume == 0]
return out
class RefugeDataset(Dataset):
def __init__(self, data_list_dir='./Datasets/fundus', data_dir='./Data/REFUGE',
train_type='train', image_type='image', transform=None):
self.transform = transform
self.train_type = train_type
self.data_list_dir = data_list_dir
self.data_dir = data_dir
self.image_type = image_type
if self.train_type in ['train', 'valid', 'test', 'data']:
# this is for cross validation
with open(join(self.data_list_dir, self.train_type+'_list'),
'r') as f:
self.image_list = f.readlines()
self.image_list = [item.replace('\n', '') for item in self.image_list]
self.data = [join(self.data_dir, 'Non-Glaucoma', self.image_type, x+'.png') for x in self.image_list]
self.mask = [join(self.data_dir, 'Non-Glaucoma', 'label', x+'.png') for x in self.image_list]
else:
print("Choosing type error, You have to choose the loading data type including: train, validation, test")
assert len(self.data) == len(self.mask)
def __getitem__(self, item: int):
'''
scipy.misc reads the '.jpg' images, the read data's format is HxWxC
'''
slice_name = self.data[item].rsplit('/', maxsplit=1)[-1].split('.')[0]
image = cv2.imread(self.data[item], cv2.IMREAD_GRAYSCALE)[:, :, np.newaxis]
image = itensity_normalize(image)
label = cv2.imread(self.mask[item], cv2.IMREAD_GRAYSCALE)[:, :, np.newaxis]
label[label == 128] = 1
label[label == 255] = 2
# plt.imshow(label)
# plt.show()
assert (label.any() > 2) == False
sample = {'slice_name': slice_name, 'image': image, 'label': label}
if self.transform is not None:
# TODO: transformation to argument datasets
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.data)
class RandomNoise(object):
def __init__(self, mu=0, sigma=0.1):
self.mu = mu
self.sigma = sigma
def __call__(self, sample):
image, label = sample['image'], sample['label']
noise = np.clip(self.sigma * np.random.randn(image.shape[0], image.shape[1], image.shape[2]), -2*self.sigma, 2*self.sigma)
noise = noise + self.mu
image = image + noise
return {'slice_name': sample['slice_name'], 'image': image, 'label': label}
class CreateOnehotLabel(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, sample):
image, label = sample['image'], sample['label']
onehot_label = np.zeros((self.num_classes, label.shape[0], label.shape[1], label.shape[2]), dtype=np.float32)
for i in range(self.num_classes):
onehot_label[i, :, :, :] = (label == i).astype(np.float32)
return {'slice_name': sample['slice_name'], 'image': image, 'label': label, 'onehot_label': onehot_label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image = sample['image']
label = sample['label']
image, label = image.transpose([2, 0, 1]), label.transpose([2, 0, 1])
image, label = image.astype(np.float32), label.astype(np.float32)
# image = image.reshape(1, image.shape[0], image.shape[1], image.shape[2]).astype(np.float32)
if 'onehot_label' in sample:
return {'slice_name': sample['slice_name'], 'image': torch.from_numpy(image),
'label': torch.from_numpy(label), 'onehot_label': torch.from_numpy(sample['onehot_label'])}
else:
return {'slice_name': sample['slice_name'], 'image': torch.from_numpy(image), 'label': torch.from_numpy(label)} | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/dataloader/ms_fundus/fundus_dataloader.py | code/dataloader/ms_fundus/fundus_dataloader.py | from __future__ import print_function, division
import os
import sys
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from glob import glob
import random
import copy
class FundusSegmentation(Dataset):
"""
Fundus segmentation dataset
including 4 domain dataset
one for test others for training
"""
def __init__(self,
base_dir='/mnt/data1/guran/Data/ms_fundus/',
phase='train',
splitid=[2, 3, 4],
transform=None,
state='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
# super().__init__()
self.state = state
self._base_dir = base_dir
self.image_list = []
self.phase = phase
self.image_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.label_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.img_name_pool = {'DGS':[], 'REF':[], 'RIM':[], 'REF_val':[]}
self.flags_DGS = ['gd', 'nd']
self.flags_REF = ['g', 'n']
self.flags_RIM = ['G', 'N', 'S']
self.flags_REF_val = ['V']
self.splitid = splitid
SEED = 1212
random.seed(SEED)
for id in splitid:
self._image_dir = os.path.join(self._base_dir, 'Domain'+str(id), phase, 'image/')
print('==> Loading {} data from: {}'.format(phase, self._image_dir))
imagelist = glob(self._image_dir + '*.png')
for image_path in imagelist:
gt_path = image_path.replace('image', 'mask')
self.image_list.append({'image': image_path, 'label': gt_path})
self.transform = transform
self._read_img_into_memory()
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
for key in self.image_pool:
if len(self.image_pool[key]) < 1:
del self.image_pool[key]
del self.label_pool[key]
del self.img_name_pool[key]
break
# Display stats
print('-----Total number of images in {}: {:d}'.format(phase, len(self.image_list)))
def __len__(self):
max = -1
for key in self.image_pool:
if len(self.image_pool[key])>max:
max = len(self.image_pool[key])
return max
def __getitem__(self, index):
if self.phase != 'test':
sample = []
for key in self.image_pool:
domain_code = list(self.image_pool.keys()).index(key)
index = np.random.choice(len(self.image_pool[key]), 1)[0]
_img = self.image_pool[key][index]
_target = self.label_pool[key][index]
_img_name = self.img_name_pool[key][index]
anco_sample = {'image': _img, 'label': _target, 'img_name': _img_name, 'dc': domain_code}
if self.transform is not None:
anco_sample = self.transform(anco_sample)
sample.append(anco_sample)
else:
sample = []
for key in self.image_pool:
try:
domain_code = list(self.image_pool.keys()).index(key)
_img = self.image_pool[key][index]
_target = self.label_pool[key][index]
_img_name = self.img_name_pool[key][index]
anco_sample = {'image': _img, 'label': _target, 'img_name': _img_name, 'dc': domain_code}
except:
continue
if self.transform is not None:
anco_sample = self.transform(anco_sample)
sample.append(anco_sample)
return sample
def _read_img_into_memory(self):
img_num = len(self.image_list)
for index in range(img_num):
basename = os.path.basename(self.image_list[index]['image'])
Flag = "NULL"
if basename[0:2] in self.flags_DGS:
Flag = 'DGS'
elif basename[0] in self.flags_REF:
Flag = 'REF'
elif basename[0] in self.flags_RIM:
Flag = 'RIM'
elif basename[0] in self.flags_REF_val:
Flag = 'REF_val'
else:
print("[ERROR:] Unknown dataset!")
return 0
if self.splitid[0] == '4':
# self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB').resize((256, 256), Image.LANCZOS))
self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB').crop((144, 144, 144+512, 144+512)).resize((256, 256), Image.LANCZOS))
_target = np.asarray(Image.open(self.image_list[index]['label']).convert('L'))
_target = _target[144:144+512, 144:144+512]
_target = Image.fromarray(_target)
else:
self.image_pool[Flag].append(
Image.open(self.image_list[index]['image']).convert('RGB').resize((256, 256), Image.LANCZOS))
# self.image_pool[Flag].append(Image.open(self.image_list[index]['image']).convert('RGB'))
_target = Image.open(self.image_list[index]['label'])
if _target.mode is 'RGB':
_target = _target.convert('L')
if self.state != 'prediction':
_target = _target.resize((256, 256))
# print(_target.size)
# print(_target.mode)
self.label_pool[Flag].append(_target)
# if self.split[0:4] in 'test':
_img_name = self.image_list[index]['image'].split('/')[-1]
self.img_name_pool[Flag].append(_img_name)
def __str__(self):
return 'Fundus(phase=' + self.phase+str(args.datasetTest[0]) + ')'
if __name__ == '__main__':
import dataloader.ms_fundus.fundus_transforms as tr
from dataloader.utils import decode_segmap
from torch.utils.data import DataLoader
from torchvision import transforms
import matplotlib.pyplot as plt
composed_transforms_tr = transforms.Compose([
tr.RandomSizedCrop(512),
tr.RandomRotate(15),
tr.Normalize_tf(),
tr.ToTensor()])
voc_train = FundusSegmentation(phase='train', splitid=[1],
transform=composed_transforms_tr)
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample[0]["image"].size()[0]):
img = sample[0]['image'].numpy()
gt = sample[0]['label'].numpy()
segmap = np.transpose(gt[jj], axes=[1, 2, 0]).astype(np.uint8)
img_tmp = np.transpose((img[jj]+1.0)*128, axes=[1, 2, 0]).astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(221)
plt.imshow(img_tmp)
plt.subplot(222)
plt.imshow(segmap[..., 0])
plt.subplot(223)
plt.imshow(segmap[..., 1])
break
plt.show(block=True)
| python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
HiLab-git/DAG4MIA | https://github.com/HiLab-git/DAG4MIA/blob/274e83c2ee0a90d06d9d1529d6526f9af6271055/code/dataloader/ms_fundus/fundus_transforms.py | code/dataloader/ms_fundus/fundus_transforms.py | import torch
import math
import numbers
import random
import numpy as np
from PIL import Image, ImageOps
from scipy.ndimage.filters import gaussian_filter
from matplotlib.pyplot import imshow, imsave
from scipy.ndimage.interpolation import map_coordinates
import cv2
from scipy import ndimage
def to_multilabel(pre_mask, classes = 2):
mask = np.zeros((pre_mask.shape[0], pre_mask.shape[1], classes))
mask[pre_mask == 1] = [0, 1]
mask[pre_mask == 2] = [1, 1]
return mask
class add_salt_pepper_noise():
def __call__(self, sample):
image = sample['image']
X_imgs_copy = np.asarray(image).copy()
salt_vs_pepper = 0.2
amount = 0.004
num_salt = np.ceil(amount * X_imgs_copy.size * salt_vs_pepper)
num_pepper = np.ceil(amount * X_imgs_copy.size * (1.0 - salt_vs_pepper))
seed = random.random()
if seed > 0.75:
# Add Salt noise
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_imgs_copy.shape]
X_imgs_copy[coords[0], coords[1], :] = 1
elif seed > 0.5:
# Add Pepper noise
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_imgs_copy.shape]
X_imgs_copy[coords[0], coords[1], :] = 0
sample['image'] = X_imgs_copy
return sample
class adjust_light():
def __call__(self, sample):
image = sample['image']
seed = random.random()
if seed > 0.5:
gamma = random.random() * 3 + 0.5
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
image = cv2.LUT(np.array(image).astype(np.uint8), table).astype(np.uint8)
sample['image'] = image
return sample
class eraser():
def __call__(self, sample, s_l=0.02, s_h=0.06, r_1=0.3, r_2=0.6, v_l=0, v_h=255, pixel_level=False):
image = sample['image']
img_h, img_w, img_c = image.shape
if random.random() > 0.5:
return sample
while True:
s = np.random.uniform(s_l, s_h) * img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
c = np.random.uniform(v_l, v_h, (h, w, img_c))
else:
c = np.random.uniform(v_l, v_h)
image[top:top + h, left:left + w, :] = c
sample['image'] = image
return sample
class elastic_transform():
"""Elastic deformation of images as described in [Simard2003]_.
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
# def __init__(self):
def __call__(self, sample):
image, label = sample['image'], sample['label']
alpha = image.size[1] * 2
sigma = image.size[1] * 0.08
random_state = None
seed = random.random()
if seed > 0.5:
# print(image.size)
assert len(image.size) == 2
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.size[0:2]
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))
transformed_image = np.zeros([image.size[0], image.size[1], 3])
transformed_label = np.zeros([image.size[0], image.size[1]])
for i in range(3):
# print(i)
transformed_image[:, :, i] = map_coordinates(np.array(image)[:, :, i], indices, order=1).reshape(shape)
# break
if label is not None:
transformed_label[:, :] = map_coordinates(np.array(label)[:, :], indices, order=1, mode='nearest').reshape(shape)
else:
transformed_label = None
transformed_image = transformed_image.astype(np.uint8)
if label is not None:
transformed_label = transformed_label.astype(np.uint8)
sample['image'] = Image.fromarray(transformed_image)
sample['label'] = transformed_label
return sample
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size # h, w
self.padding = padding
def __call__(self, sample):
img, mask = sample['image'], sample['label']
# print(img.size)
w, h = img.size
if self.padding > 0 or w < self.size[0] or h < self.size[1]:
padding = np.maximum(self.padding,np.maximum((self.size[0]-w)//2+5,(self.size[1]-h)//2+5))
img = ImageOps.expand(img, border=padding, fill=0)
mask = ImageOps.expand(mask, border=padding, fill=255)
assert img.width == mask.width
assert img.height == mask.height
w, h = img.size
th, tw = self.size # target size
if w == tw and h == th:
return {'image': img,
'label': mask,
'img_name': sample['img_name'],
'dc': sample['dc']}
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
img = img.crop((x1, y1, x1 + tw, y1 + th))
mask = mask.crop((x1, y1, x1 + tw, y1 + th))
# print(img.size)
sample['image'] = img
sample['label'] = mask
return sample
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
# assert img.width == mask.width
# assert img.height == mask.height
w, h = img.size
th, tw = self.size
x1 = int(round((w - tw) / 2.))
# y1 = int(round((h - th) / 2.))
y1 = int(round((h - th) / 2.))
img = img.crop((x1, y1, x1 + tw, y1 + th))
mask = mask.crop((x1, y1, x1 + tw, y1 + th))
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class RandomFlip(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
if random.random() < 0.5:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
mask = mask.transpose(Image.FLIP_TOP_BOTTOM)
sample['image'] = img
sample['label'] = mask
return sample
class FixedResize(object):
def __init__(self, size):
self.size = tuple(reversed(size)) # size: (h, w)
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
img = img.resize(self.size, Image.BILINEAR)
mask = mask.resize(self.size, Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': name}
class Scale(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert img.width == mask.width
assert img.height == mask.height
w, h = img.size
if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
oh, ow = self.size
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class RandomSizedCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
for attempt in range(10):
area = img.size[0] * img.size[1]
target_area = random.uniform(0.45, 1.0) * area
aspect_ratio = random.uniform(0.5, 2)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.size[0] and h <= img.size[1]:
x1 = random.randint(0, img.size[0] - w)
y1 = random.randint(0, img.size[1] - h)
img = img.crop((x1, y1, x1 + w, y1 + h))
mask = mask.crop((x1, y1, x1 + w, y1 + h))
assert (img.size == (w, h))
img = img.resize((self.size, self.size), Image.BILINEAR)
mask = mask.resize((self.size, self.size), Image.NEAREST)
return {'image': img,
'label': mask,
'img_name': name}
# Fallback
scale = Scale(self.size)
crop = CenterCrop(self.size)
sample = crop(scale(sample))
return sample
class RandomRotate(object):
def __init__(self, size=512):
self.degree = random.randint(1, 4) * 90
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
seed = random.random()
if seed > 0.5:
rotate_degree = self.degree
img = img.rotate(rotate_degree, Image.BILINEAR, expand=0)
mask = mask.rotate(rotate_degree, Image.NEAREST, expand=255)
sample['image'] = img
sample['label'] = mask
return sample
class RandomScaleCrop(object):
def __init__(self, size):
self.size = size
# self.scale = Scale(self.size)
self.crop = RandomCrop(self.size)
def __call__(self, sample):
img = sample['image']
mask = sample['label']
# print(img.size)
assert img.width == mask.width
assert img.height == mask.height
seed = random.random()
if seed > 0.5:
w = int(random.uniform(1, 1.5) * img.size[0])
h = int(random.uniform(1, 1.5) * img.size[1])
img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)
sample['image'] = img
sample['label'] = mask
return self.crop(sample)
class ResizeImg(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
img = img.resize((self.size, self.size))
# mask = mask.resize((self.size, self.size))
sample = {'image': img, 'label': mask, 'img_name': name}
return sample
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
name = sample['img_name']
assert img.width == mask.width
assert img.height == mask.height
img = img.resize((self.size, self.size))
mask = mask.resize((self.size, self.size))
sample = {'image': img, 'label': mask, 'img_name': name}
return sample
# class RandomScale(object):
# def __init__(self, limit):
# self.limit = limit
#
# def __call__(self, sample):
# img = sample['image']
# mask = sample['label']
# assert img.width == mask.width
# assert img.height == mask.height
#
# scale = random.uniform(self.limit[0], self.limit[1])
# w = int(scale * img.size[0])
# h = int(scale * img.size[1])
#
# img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)
#
# return {'image': img, 'label': mask, 'img_name': sample['img_name']}
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
class GetBoundary(object):
def __init__(self, width = 5):
self.width = width
def __call__(self, mask):
cup = mask[:, :, 0]
disc = mask[:, :, 1]
dila_cup = ndimage.binary_dilation(cup, iterations=self.width).astype(cup.dtype)
eros_cup = ndimage.binary_erosion(cup, iterations=self.width).astype(cup.dtype)
dila_disc= ndimage.binary_dilation(disc, iterations=self.width).astype(disc.dtype)
eros_disc= ndimage.binary_erosion(disc, iterations=self.width).astype(disc.dtype)
cup = dila_cup + eros_cup
disc = dila_disc + eros_disc
cup[cup==2]=0
disc[disc==2]=0
size = mask.shape
# boundary = np.zers(size[0:2])
boundary = (cup + disc) > 0
return boundary.astype(np.uint8)
class Normalize_tf(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
self.get_boundary = GetBoundary()
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
__mask = np.array(sample['label']).astype(np.uint8)
img /= 127.5
img -= 1.0
_mask = np.zeros([__mask.shape[0], __mask.shape[1]])
_mask[__mask > 200] = 255
# index = np.where(__mask > 50 and __mask < 201)
_mask[(__mask > 50) & (__mask < 201)] = 128
_mask[(__mask > 50) & (__mask < 201)] = 128
__mask[_mask == 0] = 2
__mask[_mask == 255] = 0
__mask[_mask == 128] = 1
mask = to_multilabel(__mask)
sample['image'] = img
sample['label'] = mask
return sample
class Normalize_cityscapes(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.)):
self.mean = mean
def __call__(self, sample):
img = np.array(sample['image']).astype(np.float32)
mask = np.array(sample['label']).astype(np.float32)
img -= self.mean
img /= 255.0
return {'image': img,
'label': mask,
'img_name': sample['img_name']}
def ToMultiLabel(dc):
new_dc = np.zeros([3])
for i in range(new_dc.shape[0]):
if i == dc:
new_dc[i] = 1
return new_dc
def SoftLable(label):
new_label = label.copy()
label = list(label)
index = label.index(1)
new_label[index] = 0.8+random.random()*0.2
accelarate = new_label[index]
for i in range(len(label)):
if i != index:
if i == len(label) - 1:
new_label[i] = 1 - accelarate
else:
new_label[i] = random.random()*(1-accelarate)
accelarate += new_label[i]
return new_label
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = np.array(sample['image']).astype(np.float32).transpose((2, 0, 1))
map = np.array(sample['label']).astype(np.uint8).transpose((2, 0, 1))
img = torch.from_numpy(img).float()
map = torch.from_numpy(map).float()
sample['image']=img
sample['label']=map
# domain_code = torch.from_numpy(SoftLable(ToMultiLabel(sample['dc']))).float()
# sample['dc'] = domain_code
return sample | python | MIT | 274e83c2ee0a90d06d9d1529d6526f9af6271055 | 2026-01-05T07:14:10.960732Z | false |
bilibili/apk-channelization | https://github.com/bilibili/apk-channelization/blob/9efeb80e5cbdc04c020efc056e92a10ef6f13ef5/updatetime.py | updatetime.py | #!/usr/bin/env python
import os, time
def update_file_time(dir_path):
for root,dirs,files in os.walk(dir_path):
for file in files:
file_path = os.path.join(root, file)
st = os.stat(file_path)
mtime = time.localtime(st.st_mtime)
file_time = mtime[0:6]
#ZIP does not support timestamps before 1980
if file_time[0] < 1980:
now = int(time.time())
os.utime(file_path, (now, now)) | python | MIT | 9efeb80e5cbdc04c020efc056e92a10ef6f13ef5 | 2026-01-05T07:14:11.833453Z | false |
bilibili/apk-channelization | https://github.com/bilibili/apk-channelization/blob/9efeb80e5cbdc04c020efc056e92a10ef6f13ef5/signingapks.py | signingapks.py | #!/usr/bin/env python
import sys, getopt, os
apkfiles = []
zipalignexe = None
class signingConfig:
verbose = False
keystore = None
storepass = None
keyalias = None
keypass = None
def usage():
print 'Usage: ./signingapks.py <signingConfig> [--zipalignexe=path/to/zipalign] <apkfile>...'
print 'signingConfig:\n --keystore=path/to/keystore'
print ' --storepass=keystore password'
print ' --keyalias=key alias'
print ' --keypass=key password'
def parse_options(argv):
try:
opts, args = getopt.getopt(argv, 'hv', ['keystore=','storepass=','keyalias=','keypass=','zipalignexe='])
if len(args) == 0:
print 'path to apk is missing'
usage()
return 1
global apkfiles
global zipalignexe
apkfiles = args[:]
for opt, arg in opts:
if opt == '-h':
usage()
return 1
if opt == '-v':
signingConfig.verbose = True
elif opt == '--keystore':
signingConfig.keystore = arg
elif opt == '--storepass':
signingConfig.storepass = arg
elif opt == '--keyalias':
signingConfig.keyalias = arg
elif opt == '--keypass':
signingConfig.keypass = arg
elif opt == '--zipalignexe':
zipalignexe = arg
else:
print 'invalid option "%s %s"' %(opt, arg)
return 0
except getopt.GetoptError as inst:
print 'invalid options:', inst
sys.exit(1)
def sign_apks():
if signingConfig.keystore == None:
raise ValueError('where is your keystore file ?')
if signingConfig.keyalias == None:
raise ValueError('missing key alias ?')
for apkfile in apkfiles:
if not os.path.exists(apkfile):
raise RuntimeError('apk file %s is not exists!' %apkfile)
print 'signing ', apkfile
cmd = 'jarsigner'
if signingConfig.verbose:
cmd += ' -verbose'
cmd += ' -sigalg SHA1withRSA -digestalg SHA1 -keystore '+signingConfig.keystore
if signingConfig.storepass != None:
cmd += ' -storepass '+signingConfig.storepass
if signingConfig.keypass != None:
cmd += ' -keypass '+signingConfig.keypass
cmd += ' '+apkfile+' '+signingConfig.keyalias
# print 'run '+cmd
result = os.system(cmd)
if result:
print 'jarsigner exit non-zero: ', result
sys.exit(1)
raw_filename = os.path.splitext(apkfile)[0]
if raw_filename.endswith('-unsigned'):
raw_filename = raw_filename[:-len('-unsigned')]
signedapk = raw_filename
if not signedapk.endswith('-signed-unaligned'):
signedapk+='-signed-unaligned.apk'
if apkfile is not signedapk:
if os.path.exists(signedapk):
os.remove(signedapk)
os.rename(apkfile, signedapk)
alignedapk = os.path.splitext(signedapk)[0][:-len('-signed-unaligned')]+'.apk'
if zipalignexe != None and zipalignexe is not 'null':
print 'zipalign ', signedapk
cmd = zipalignexe+' -f 4 '+signedapk +' '+alignedapk
result = os.system(cmd)
if result:
print 'zipalign exit non-zero: ', result
sys.exit(1)
os.remove(signedapk)
if __name__ == '__main__':
try:
if parse_options(sys.argv[1:]):
sys.exit(1)
sign_apks()
except Exception, e:
print e
sys.exit(2) | python | MIT | 9efeb80e5cbdc04c020efc056e92a10ef6f13ef5 | 2026-01-05T07:14:11.833453Z | false |
bilibili/apk-channelization | https://github.com/bilibili/apk-channelization/blob/9efeb80e5cbdc04c020efc056e92a10ef6f13ef5/repackage.py | repackage.py | #!/usr/bin/env python
import getopt, sys, os, zipfile, shutil, traceback, struct, signingapks, updatetime
class options:
channels = []
apkfile = None
output = None
def usage():
print 'Usage: %s [options] <path/to/apk>' %(sys.argv[0])
print 'options:\n -c <a,b,c...>\trepackage channels'
print ' -o <path>\toutput directory'
print ' -f <file>\tpath to channel names file'
signingapks.usage()
def parse_options(argv):
try:
opts, args = getopt.getopt(argv, "hc:f:o:s:",['keystore=','storepass=','keyalias=','keypass=','zipalignexe='])
if len(args) == 0:
print 'path to apk is missing'
usage()
return 1
options.apkfile = args[0]
for opt, arg in opts:
if opt == '-h':
usage()
return 1
elif opt == '-c':
options.channels = arg.split(',')
elif opt == '-f':
options.channels = parse_channels_file(arg)
elif opt == '-o':
options.output = arg
elif opt == '--keystore':
signingapks.signingConfig.keystore = arg
elif opt == '--storepass':
signingapks.signingConfig.storepass = arg
elif opt == '--keyalias':
signingapks.signingConfig.keyalias = arg
elif opt == '--keypass':
signingapks.signingConfig.keypass = arg
elif opt == '--zipalignexe':
signingapks.zipalignexe = arg
else:
print 'invalid option "%s %s"' %(opt, arg)
return 0
except getopt.GetoptError as inst:
print 'invalid options:', inst
sys.exit(1)
def parse_channels_file(path):
if not os.path.exists(path):
raise ValueError("path of channels file %s is not exists" %path)
with open(path) as f:
lines = f.read().splitlines()
channels = []
for line in lines:
if len(line) == 0 or line[0] == '#':
continue
channels.append(line)
return channels
def axml_utf16_pack(string):
pack = bytearray(string.encode('utf-16'))
str_len_pack = struct.pack('<I', len(string))
pack[ : 2] = struct.unpack('BB', str_len_pack[ : 2])
return pack
def find_pack_in_axml(axml_data, pack, start_pos):
pos = axml_data.find(pack, start_pos, -1)
return pos
def replace_axml_string(axml_data, old_string, new_string):
new_string_pack = axml_utf16_pack(new_string)
old_string_pack = axml_utf16_pack(old_string)
new_string_pack_len = len(new_string_pack)
old_string_pack_len = len(old_string_pack)
if old_string_pack_len < new_string_pack_len:
raise ValueError('new_string cannot be larger than old_string! ')
pos = 0
while True:
pos = find_pack_in_axml(axml_data, old_string_pack, pos)
if pos < 0:
break
axml_data[pos : pos + new_string_pack_len] = new_string_pack[ : new_string_pack_len]
delta = old_string_pack_len - new_string_pack_len
if delta:
axml_data[pos + new_string_pack_len: pos + old_string_pack_len] = bytearray(delta)
_ANDROID_MANIFEST_XML = 'AndroidManifest.xml'
_CHANNEL_PLACE_HOLDER = 'xxxxxxxxxxxxxxxx' #should be larger than every length of channels
_CHANNEL_PLACE_HOLDER_LEN = len(_CHANNEL_PLACE_HOLDER)
def repackage(argv):
if parse_options(argv):
sys.exit(1)
if len(options.channels) == 0:
print 'you have not defined channels!'
sys.exit(2)
for channel in options.channels:
if len(channel) > _CHANNEL_PLACE_HOLDER_LEN:
raise ValueError('channel string cannot be larger than place-holder:'+ channel)
if not os.path.isfile(options.apkfile):
print 'the path of apk you point to is not a file: ', options.apkfile
sys.exit(2)
if options.output is None:
options.output = os.path.abspath(os.path.join(options.apkfile, os.pardir, 'channels'))
print 'repackage %s to channels %s \noutput path is %s' %(options.apkfile, options.channels, options.output)
out = options.output
if not os.path.exists(out):
os.mkdir(out)
temp = os.path.join(out, 'tmp')
apkfile = options.apkfile
# unzip apk to temp
if os.path.exists(temp):
print 'cleaning temp directory:', temp
shutil.rmtree(temp)
os.system('unzip -q '+apkfile+' -d '+temp)
# delete signing info
shutil.rmtree(os.path.join(temp, 'META-INF'))
# update time
updatetime.update_file_time(temp)
temp_manifest = os.path.join(temp,_ANDROID_MANIFEST_XML)
with open(temp_manifest, 'rb') as source:
raw_axml_data = bytearray(source.read())
raw_filename, ext = os.path.splitext(os.path.basename(apkfile))
# loop channel conditions
apkfiles = []
for channel in options.channels:
print 'starting package channel %s apk' %channel
apkfile = package_channel_apk(raw_axml_data, channel, raw_filename, out, temp)
apkfiles.append(apkfile)
shutil.rmtree(temp)
shutil.rmtree(os.path.join(out, 'raw'))
signingapks.apkfiles[:] = apkfiles
signingapks.sign_apks()
def package_channel_apk(raw_axml_data, channel, raw_filename, out, temp):
newapk_name = raw_filename+'-'+channel+'-unsigned'
newapk = os.path.join(out, newapk_name+'.apk')
if os.path.isfile(newapk):
os.remove(newapk) # remove old apk
print 'creating unsigned apk :', newapk
# clone a new buffer
cloned_buffer = bytearray(len(raw_axml_data))
cloned_buffer[:] = raw_axml_data
replace_axml_string(cloned_buffer, _CHANNEL_PLACE_HOLDER, channel)
temp_manifest = os.path.join(temp, _ANDROID_MANIFEST_XML)
with open(temp_manifest, 'wb') as f:
#print 'writing channel %s to AndroidManifest.xml' %channel
f.write(cloned_buffer)
temp_raw = os.path.join(temp, 'res/raw')
if os.path.exists(temp_raw):
shutil.move(temp_raw, out)
tempzip_name = os.path.join(out, newapk_name)
tempzip = tempzip_name+'.zip'
if os.path.exists(tempzip):
os.remove(tempzip)
#print 'creating channel archive', tempzip
shutil.make_archive(tempzip_name, 'zip', temp)
out_raw = os.path.join(out, 'raw')
mZipFile = zipfile.ZipFile(tempzip, "a")
for file in os.listdir(out_raw):
full_path = os.path.join(out_raw, file);
if os.path.isfile(full_path):
mZipFile.write(full_path, "res\\raw\\" + file, zipfile.ZIP_STORED )
mZipFile.close()
os.rename(tempzip, newapk)
#print 'renamed to ', newapk
return newapk
if __name__ == '__main__':
try:
repackage(sys.argv[1:])
except Exception, e:
print traceback.format_exc()
sys.exit(2) | python | MIT | 9efeb80e5cbdc04c020efc056e92a10ef6f13ef5 | 2026-01-05T07:14:11.833453Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/imageDepthEstimation.py | imageDepthEstimation.py | import cv2
import tensorflow as tf
import numpy as np
from hitnet import HitNet, ModelType, draw_disparity, draw_depth, CameraConfig, load_img
# Select model type
# model_type = ModelType.middlebury
# model_type = ModelType.flyingthings
model_type = ModelType.eth3d
if model_type == ModelType.middlebury:
model_path = "models/middlebury_d400.pb"
elif model_type == ModelType.flyingthings:
model_path = "models/flyingthings_finalpass_xl.pb"
elif model_type == ModelType.eth3d:
model_path = "models/eth3d.pb"
# Initialize model
hitnet_depth = HitNet(model_path, model_type)
# Load images
left_img = load_img("https://vision.middlebury.edu/stereo/data/scenes2003/newdata/cones/im2.png")
right_img = load_img("https://vision.middlebury.edu/stereo/data/scenes2003/newdata/cones/im6.png")
# Estimate the depth
disparity_map = hitnet_depth(left_img, right_img)
color_disparity = draw_disparity(disparity_map)
cobined_image = np.hstack((left_img, right_img, color_disparity))
cv2.namedWindow("Estimated disparity", cv2.WINDOW_NORMAL)
cv2.imshow("Estimated disparity", cobined_image)
cv2.waitKey(0)
cv2.imwrite("out.jpg", cobined_image)
cv2.destroyAllWindows() | python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/drivingStereoTest.py | drivingStereoTest.py | import cv2
import pafy
import tensorflow as tf
import numpy as np
import glob
from hitnet import HitNet, ModelType, draw_disparity, draw_depth, CameraConfig
out = cv2.VideoWriter('outpy2.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (881*3,400))
# Get image list
left_images = glob.glob('DrivingStereo images/left/*.jpg')
left_images.sort()
right_images = glob.glob('DrivingStereo images/right/*.jpg')
right_images.sort()
depth_images = glob.glob('DrivingStereo images/depth/*.png')
depth_images.sort()
# Select model type
model_type = ModelType.middlebury
# model_type = ModelType.flyingthings
# model_type = ModelType.eth3d
if model_type == ModelType.middlebury:
model_path = "models/middlebury_d400.pb"
elif model_type == ModelType.flyingthings:
model_path = "models/flyingthings_finalpass_xl.pb"
elif model_type == ModelType.eth3d:
model_path = "models/eth3d.pb"
camera_config = CameraConfig(0.546, 1000)
max_distance = 50
# Initialize model
hitnet_depth = HitNet(model_path, model_type, camera_config)
cv2.namedWindow("Estimated depth", cv2.WINDOW_NORMAL)
for left_path, right_path, depth_path in zip(left_images[1500:1700:2], right_images[1500:1700:2], depth_images[1500:1700:2]):
# Read frame from the video
left_img = cv2.imread(left_path)
right_img = cv2.imread(right_path)
depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED).astype(np.float32)/256
# Estimate the depth
disparity_map = hitnet_depth(left_img, right_img)
depth_map = hitnet_depth.get_depth()
color_disparity = draw_disparity(disparity_map)
color_depth = draw_depth(depth_map, max_distance)
color_real_depth = draw_depth(depth_img, max_distance)
cobined_image = np.hstack((left_img,color_real_depth, color_depth))
out.write(cobined_image)
cv2.imshow("Estimated depth", cobined_image)
# Press key q to stop
if cv2.waitKey(1) == ord('q'):
break
out.release()
cv2.destroyAllWindows() | python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/videoDepthEstimation.py | videoDepthEstimation.py | import cv2
import pafy
import tensorflow as tf
import numpy as np
from hitnet import HitNet, ModelType, draw_disparity, draw_depth, CameraConfig
# Initialize video
# cap = cv2.VideoCapture("video.mp4")
videoUrl = 'https://youtu.be/Yui48w71SG0'
videoPafy = pafy.new(videoUrl)
print(videoPafy.streams)
cap = cv2.VideoCapture(videoPafy.getbestvideo().url)
# Select model type
# model_type = ModelType.middlebury
# model_type = ModelType.flyingthings
model_type = ModelType.eth3d
if model_type == ModelType.middlebury:
model_path = "models/middlebury_d400.pb"
elif model_type == ModelType.flyingthings:
model_path = "models/flyingthings_finalpass_xl.pb"
elif model_type == ModelType.eth3d:
model_path = "models/eth3d.pb"
# Store baseline (m) and focal length (pixel)
camera_config = CameraConfig(0.1, 320)
max_distance = 5
# Initialize model
hitnet_depth = HitNet(model_path, model_type, camera_config)
cv2.namedWindow("Estimated depth", cv2.WINDOW_NORMAL)
while cap.isOpened():
try:
# Read frame from the video
ret, frame = cap.read()
if not ret:
break
except:
continue
# Extract the left and right images
left_img = frame[:,:frame.shape[1]//3]
right_img = frame[:,frame.shape[1]//3:frame.shape[1]*2//3]
color_real_depth = frame[:,frame.shape[1]*2//3:]
# Estimate the depth
disparity_map = hitnet_depth(left_img, right_img)
depth_map = hitnet_depth.get_depth()
color_disparity = draw_disparity(disparity_map)
color_depth = draw_depth(depth_map, max_distance)
cobined_image = np.hstack((left_img,color_real_depth, color_depth))
cv2.imshow("Estimated depth", cobined_image)
# Press key q to stop
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/hitnet/__init__.py | hitnet/__init__.py | from hitnet.hitnet import HitNet
from hitnet.utils_hitnet import * | python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/hitnet/hitnet.py | hitnet/hitnet.py | import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
class HitNet():
def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):
self.fps = 0
self.timeLastPrediction = time.time()
self.frameCounter = 0
self.camera_config = camera_config
# Initialize model
self.model = self.initialize_model(model_path, model_type)
def __call__(self, left_img, right_img):
return self.estimate_disparity(left_img, right_img)
def initialize_model(self, model_path, model_type):
self.model_type = model_type
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(f.read())
# Wrap frozen graph to ConcreteFunctions
if self.model_type == ModelType.flyingthings:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs=["reference_output_disparity:0","secondary_output_disparity:0"])
else:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs="reference_output_disparity:0")
return model
def estimate_disparity(self, left_img, right_img):
input_tensor = self.prepare_input(left_img, right_img)
# Perform inference on the image
if self.model_type == ModelType.flyingthings:
left_disparity, right_disparity = self.inference(input_tensor)
self.disparity_map = left_disparity
else:
self.disparity_map = self.inference(input_tensor)
return self.disparity_map
def get_depth(self):
return self.camera_config.f*self.camera_config.baseline/self.disparity_map
def prepare_input(self, left_img, right_img):
if (self.model_type == ModelType.eth3d):
# Shape (1, None, None, 2)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
left_img = np.expand_dims(left_img,2)
right_img = np.expand_dims(right_img,2)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
else:
# Shape (1, None, None, 6)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)
def inference(self, input_tensor):
output = self.model(input_tensor)
return np.squeeze(output)
| python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
ibaiGorordo/HITNET-Stereo-Depth-estimation | https://github.com/ibaiGorordo/HITNET-Stereo-Depth-estimation/blob/a64564954bfa2eb141bda709e942952a9c3be80f/hitnet/utils_hitnet.py | hitnet/utils_hitnet.py | from enum import Enum
import tensorflow as tf
import numpy as np
import cv2
import urllib
from dataclasses import dataclass
class ModelType(Enum):
eth3d = 0
middlebury = 1
flyingthings = 2
@dataclass
class CameraConfig:
baseline: float
f: float
def wrap_frozen_graph(graph_def, inputs, outputs):
def _imports_graph_def():
tf.compat.v1.import_graph_def(graph_def, name="")
wrapped_import = tf.compat.v1.wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
def draw_disparity(disparity_map):
disparity_map = disparity_map.astype(np.uint8)
norm_disparity_map = (255*((disparity_map-np.min(disparity_map))/(np.max(disparity_map) - np.min(disparity_map))))
return cv2.applyColorMap(cv2.convertScaleAbs(norm_disparity_map,1), cv2.COLORMAP_MAGMA)
def draw_depth(depth_map, max_dist):
norm_depth_map = 255*(1-depth_map/max_dist)
norm_depth_map[norm_depth_map < 0] =0
norm_depth_map[depth_map == 0] =0
return cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_MAGMA)
def load_img(url):
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
return cv2.imdecode(arr, -1) # 'Load it as it is'
| python | MIT | a64564954bfa2eb141bda709e942952a9c3be80f | 2026-01-05T07:14:11.475984Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/media_player.py | custom_components/vaca/media_player.py | """Media player entity for VA Wyoming."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.components import media_source
from homeassistant.components.media_player import (
BrowseMedia,
MediaPlayerDeviceClass,
MediaPlayerEnqueue,
MediaPlayerEntity,
MediaPlayerEntityDescription,
MediaPlayerEntityFeature,
MediaPlayerState,
async_process_play_media_url,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
from .custom import CustomActions
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up media_player entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
async_add_entities([WyomingMediaPlayer(device)])
class WyomingMediaPlayer(VASatelliteEntity, MediaPlayerEntity):
"""Represents a hassmic media player."""
entity_description = MediaPlayerEntityDescription(
key="media_player",
translation_key="media_player",
device_class=MediaPlayerDeviceClass.SPEAKER,
name="Media player",
)
_attr_state = MediaPlayerState.IDLE
_attr_volume_level = 0.9
_attr_supported_features = (
MediaPlayerEntityFeature(0)
| MediaPlayerEntityFeature.MEDIA_ANNOUNCE
| MediaPlayerEntityFeature.PAUSE
| MediaPlayerEntityFeature.PLAY
| MediaPlayerEntityFeature.PLAY_MEDIA
| MediaPlayerEntityFeature.STOP
| MediaPlayerEntityFeature.VOLUME_SET
| MediaPlayerEntityFeature.BROWSE_MEDIA
# | MediaPlayerEntityFeature.MEDIA_ENQUEUE
# | MediaPlayerEntityFeature.NEXT_TRACK
)
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
async def async_play_media(
self,
media_type: str,
media_id: str,
enqueue: MediaPlayerEnqueue | None = None,
announce: bool | None = None,
**kwargs: Any,
):
"""Play a piece of media."""
_LOGGER.info(
"Playing media: type=%s, id=%s, enq=%s, announce=%s, args=%s",
media_type,
media_id,
enqueue,
announce,
kwargs,
)
# resolve a media_source_id into a URL
# https://developers.home-assistant.io/docs/core/entity/media-player/#play-media
if media_source.is_media_source_id(media_id):
play_item = await media_source.async_resolve_media(
self.hass, media_id, self.entity_id
)
media_id = async_process_play_media_url(self.hass, play_item.url)
_LOGGER.info("Playing media: '%s'", media_id)
self._device.send_custom_action(
command=CustomActions.MEDIA_PLAY_MEDIA,
payload={"url": media_id, "volume": (self._attr_volume_level or 0) * 100},
)
self._attr_state = MediaPlayerState.PLAYING
# Handle metadata if available
meta_data = {}
if "extra" in kwargs:
extra = kwargs["extra"]
meta_data = extra.get("metadata", {})
await self.async_process_metadata(metadata=meta_data)
self.async_write_ha_state()
async def async_media_play(self):
"""Send a play command."""
_LOGGER.info("Playing")
self._device.send_custom_action(
command=CustomActions.MEDIA_PLAY,
payload={"volume": (self._attr_volume_level or 0) * 100},
)
self._attr_state = MediaPlayerState.PLAYING
self.async_write_ha_state()
async def async_media_pause(self):
"""Send a pause command."""
_LOGGER.info("Pausing playback")
self._device.send_custom_action(
command=CustomActions.MEDIA_PAUSE,
)
self._attr_state = MediaPlayerState.PAUSED
self.async_write_ha_state()
async def async_media_stop(self):
"""Send a stop command."""
_LOGGER.info("Stopping playback")
self._device.send_custom_action(
command=CustomActions.MEDIA_STOP,
)
self._attr_state = MediaPlayerState.IDLE
await self.async_process_metadata({})
self.async_write_ha_state()
async def async_set_volume_level(self, volume: float) -> None:
"""Set the volume level."""
_LOGGER.info("Setting playback volume to %f", volume)
self._device.send_custom_action(
command=CustomActions.MEDIA_SET_VOLUME,
payload={"volume": volume * 100},
)
self._attr_volume_level = volume
self.async_write_ha_state()
async def async_volume_up(self):
"""Increase the volume level."""
return await self.async_set_volume_level((self._attr_volume_level or 0) + 0.1)
async def async_volume_down(self):
"""Decrease the volume level."""
return await self.async_set_volume_level((self._attr_volume_level or 0) - 0.1)
# https://developers.home-assistant.io/docs/core/entity/media-player/#browse-media
async def async_browse_media(
self, media_content_type: str | None = None, media_content_id: str | None = None
) -> BrowseMedia:
"""Implement the websocket media browsing helper."""
return await media_source.async_browse_media(
self.hass,
media_content_id,
content_filter=lambda item: item.media_content_type.startswith("audio/"),
)
async def async_process_metadata(self, metadata: dict[str, Any]) -> None:
"""Process metadata from the media player."""
_LOGGER.info("Processing metadata: %s", metadata)
self._attr_media_title = metadata.get("title")
self._attr_media_artist = metadata.get("artist")
self._attr_media_album_name = metadata.get("albumName")
self._attr_entity_picture = metadata.get("imageURL")
self.async_write_ha_state()
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/client.py | custom_components/vaca/client.py | """Custom AsyncTCPClient for Wyoming events."""
from wyoming.client import AsyncTcpClient
from wyoming.event import Event
class VAAsyncTcpClient(AsyncTcpClient):
"""Custom TCP client for Wyoming events."""
def __init__(
self,
host: str,
port: int,
before_send_callback=None,
after_send_callback=None,
on_receive_callback=None,
) -> None:
"""Initialize the custom TCP client."""
super().__init__(host, port)
self._before_send_callback = before_send_callback
self._after_send_callback = after_send_callback
self._on_receive_callback = on_receive_callback
async def write_event(self, event: Event) -> None:
"""Write an event to the server."""
if self._before_send_callback:
await self._before_send_callback(event)
if self.can_write_event():
await super().write_event(event)
if self._after_send_callback:
await self._after_send_callback(event)
async def read_event(self) -> Event | None:
"""Read an event from the server."""
modified_event = None
forward_event = False
while not forward_event:
try:
event = await super().read_event()
if self._on_receive_callback:
forward_event, modified_event = self._on_receive_callback(event)
except ConnectionResetError:
return None
return modified_event if modified_event else event
def can_write_event(self) -> bool:
"""Check if the client can write an event."""
return self._writer is not None and not self._writer.is_closing()
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/assist_satellite.py | custom_components/vaca/assist_satellite.py | """Assist satellite entity for Wyoming integration."""
from __future__ import annotations
import asyncio
import io
import logging
import time
from typing import Final
import wave
from wyoming.audio import AudioChunk, AudioStart, AudioStop
from wyoming.event import Event
from wyoming.info import Describe
from wyoming.pipeline import PipelineStage, RunPipeline
from wyoming.satellite import RunSatellite
from homeassistant.components import assist_pipeline, ffmpeg, tts
from homeassistant.components.assist_pipeline import PipelineEvent
from homeassistant.components.assist_satellite import (
AssistSatelliteAnnouncement,
AssistSatelliteEntityDescription,
AssistSatelliteEntityFeature,
)
from homeassistant.components.wyoming import DomainDataItem, WyomingService
# pylint: disable-next=hass-component-root-import
from homeassistant.components.wyoming.assist_satellite import WyomingAssistSatellite
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.components import intent
from .client import VAAsyncTcpClient
from .const import DOMAIN, MIN_APK_VERSION, SAMPLE_CHANNELS, SAMPLE_WIDTH
from .custom import (
ACTION_EVENT_TYPE,
CAPABILITIES_EVENT_TYPE,
SETTINGS_EVENT_TYPE,
STATUS_EVENT_TYPE,
CustomEvent,
PipelineEnded,
getIntegrationVersion,
getVADashboardPath,
)
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
_LOGGER = logging.getLogger(__name__)
_SAMPLES_PER_CHUNK: Final = 1024
_RECONNECT_SECONDS: Final = 10
_RESTART_SECONDS: Final = 3
_PING_TIMEOUT: Final = 5
_PING_SEND_DELAY: Final = 2
_PIPELINE_FINISH_TIMEOUT: Final = 1
_TTS_SAMPLE_RATE: Final = 22050
_ANNOUNCE_CHUNK_BYTES: Final = 2048 # 1024 samples
_TTS_TIMEOUT_EXTRA: Final = 1.0
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Wyoming Assist satellite entity."""
domain_data: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
assert domain_data.device is not None
device: VASatelliteDevice = domain_data.device # type: ignore[assignment]
async_add_entities(
[ViewAssistSatelliteEntity(hass, domain_data.service, device, config_entry)]
)
class ViewAssistSatelliteEntity(WyomingAssistSatellite, VASatelliteEntity):
"""View Assist satellite entity for Wyoming devices."""
entity_description = AssistSatelliteEntityDescription(
key="assist_satellite", translation_key="assist_satellite"
)
_attr_name = None
_attr_supported_features = (
AssistSatelliteEntityFeature.ANNOUNCE
| AssistSatelliteEntityFeature.START_CONVERSATION
)
def __init__(
self,
hass: HomeAssistant,
service: WyomingService,
device: VASatelliteDevice,
config_entry: ConfigEntry,
) -> None:
"""Initialize a View Assist satellite."""
WyomingAssistSatellite.__init__(self, hass, service, device, config_entry)
VASatelliteEntity.__init__(self, device)
self._client: VAAsyncTcpClient | None = None
self.device: VASatelliteDevice = device
self.device.set_custom_settings_listener(self._custom_settings_changed)
self.device.set_custom_action_listener(self._send_custom_action)
# Make info accessible from entities
self.device.info = service.info
# Init custom settings
self.device.custom_settings = {}
# stream tts var to allow interupt and cancel remaining response
self.stream_tts = False
async def on_restart(self) -> None:
"""Block until pipeline loop will be restarted."""
_LOGGER.warning(
"Satellite %s has been disconnected. Reconnecting in %s second(s)",
self.entity_id.replace("assist_satellite.", ""),
_RECONNECT_SECONDS,
)
await asyncio.sleep(_RESTART_SECONDS)
async def on_reconnect(self) -> None:
"""Block until a reconnection attempt should be made."""
_LOGGER.debug(
"Failed to connect to %s satellite. Reconnecting in %s second(s)",
self.entity_id.replace("assist_satellite.", ""),
_RECONNECT_SECONDS,
)
await asyncio.sleep(_RECONNECT_SECONDS)
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
try:
await super().async_will_remove_from_hass()
except AssertionError as ex:
_LOGGER.debug("Assertion error while stopping satellite: %s", ex)
async def on_before_send_event_callback(self, event: Event) -> None:
"""Allow injection of events before event sent."""
if RunSatellite().is_type(event.type):
# integration version
if self.device and self.device.custom_settings:
self.device.custom_settings[
"integration_version"
] = await getIntegrationVersion(self.hass)
self.device.custom_settings["min_required_apk_version"] = (
MIN_APK_VERSION
)
# Update url and port
self.device.custom_settings["ha_port"] = (
self.hass.config.api.port if self.hass.config.api else 8123
)
self.device.custom_settings["ha_url"] = (
self.hass.config.internal_url
if self.hass.config.internal_url
else ""
)
home = getVADashboardPath(self.hass, self.device.satellite_id)
self.device.custom_settings["ha_dashboard"] = home.removeprefix("/")
# Send config event
self._custom_settings_changed()
async def on_after_send_event_callback(self, event: Event) -> None:
"""Allow injection of events after event sent."""
if Describe().is_type(event.type) and self._client:
await self._client.write_event(CustomEvent("capabilities").event())
@callback
def on_receive_event_callback(self, event: Event) -> tuple[bool, Event | None]:
"""Handle received custom events."""
if event and AudioStop.is_type(event.type):
self.stream_tts = False
return not self.stream_tts, event
if event and CustomEvent.is_type(event.type):
# Custom event
evt = CustomEvent.from_event(event)
if evt.event_type == CAPABILITIES_EVENT_TYPE and evt.event_data:
self.device.capabilities = evt.event_data.get("capabilities", {})
elif evt.event_type == STATUS_EVENT_TYPE:
_LOGGER.debug(
"Received %s event: %s",
evt.event_type,
evt.event_data,
)
async_dispatcher_send(
self.hass,
f"{DOMAIN}_{self.device.device_id}_{evt.event_type}_update",
evt.event_data,
)
return False, None
return True, event
async def _connect(self) -> None:
"""Connect to satellite over TCP. Uses custom TCP client to allow callbacks on send."""
await self._disconnect()
_LOGGER.debug(
"Connecting VACA to satellite at %s:%s",
self.service.host,
self.service.port,
)
self._client = VAAsyncTcpClient(
self.service.host,
self.service.port,
before_send_callback=self.on_before_send_event_callback,
after_send_callback=self.on_after_send_event_callback,
on_receive_callback=self.on_receive_event_callback,
)
await self._client.connect()
def on_pipeline_event(self, event: PipelineEvent) -> None:
"""Handle pipeline events from the assist pipeline.
To allow additional functionality, this method is overridden to handle
specific events such as STT and TTS updates. This is necessary to ensure
that the satellite can respond to these events appropriately, such as
updating listeners for speech-to-text and text-to-speech outputs.
MSP - Added by MSP1974 2025-07-08
"""
if event.type == assist_pipeline.PipelineEventType.RUN_START:
# Fix for error when running pipeline for ask question
if event.data and not event.data.get("tts_output"):
event.data["tts_output"] = {"token": ""}
elif event.type == assist_pipeline.PipelineEventType.RUN_END:
# Pipeline ended
if self._client is not None:
self.config_entry.async_create_background_task(
self.hass,
self._client.write_event(PipelineEnded().event()),
"send pipeline ended event",
)
elif event.type == assist_pipeline.PipelineEventType.STT_END:
# Speech-to-text transcript
if event.data:
# Inform client of transript
stt_text = event.data["stt_output"]["text"]
if self.device.stt_listener is not None:
self.device.stt_listener(stt_text)
elif event.type == assist_pipeline.PipelineEventType.TTS_START:
# Text-to-speech text
if event.data:
if self.device.tts_listener is not None:
self.device.tts_listener(event.data["tts_input"])
elif event.type == assist_pipeline.PipelineEventType.INTENT_END:
# Intent processing complete - update intent sensor
if event.data:
_LOGGER.debug(
"Intent %s complete: %s",
event.type,
event.data,
)
if (
event.data.get("intent_output", {})
.get("response", {})
.get("speech")
):
async_dispatcher_send(
self.hass,
f"{DOMAIN}_{self.device.device_id}_intent_output",
event.data,
)
super().on_pipeline_event(event)
async def async_announce(self, announcement: AssistSatelliteAnnouncement) -> None:
"""Announce media on the satellite.
Should block until the announcement is done playing.
MSP - Fixes that Wyoming announce does not play preannounce sound
"""
assert self._client is not None
if self._ffmpeg_manager is None:
self._ffmpeg_manager = ffmpeg.get_ffmpeg_manager(self.hass)
if self._played_event_received is None:
self._played_event_received = asyncio.Event()
self._played_event_received.clear()
await self._client.write_event(
AudioStart(
rate=_TTS_SAMPLE_RATE,
width=SAMPLE_WIDTH,
channels=SAMPLE_CHANNELS,
timestamp=0,
).event()
)
timestamp = 0
# Play preannounce sound if set
if announcement.preannounce_media_id:
preannounce_proc = await asyncio.create_subprocess_exec(
self._ffmpeg_manager.binary,
"-i",
announcement.preannounce_media_id,
"-f",
"s16le",
"-ac",
str(SAMPLE_CHANNELS),
"-ar",
str(_TTS_SAMPLE_RATE),
"-nostats",
"pipe:",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False, # use posix_spawn in CPython < 3.13
)
assert preannounce_proc.stdout is not None
while True:
chunk_bytes = await preannounce_proc.stdout.read(_ANNOUNCE_CHUNK_BYTES)
if not chunk_bytes:
break
chunk = AudioChunk(
rate=_TTS_SAMPLE_RATE,
width=SAMPLE_WIDTH,
channels=SAMPLE_CHANNELS,
audio=chunk_bytes,
timestamp=timestamp,
)
await self._client.write_event(chunk.event())
timestamp += chunk.milliseconds
try:
# Use ffmpeg to convert to raw PCM audio with the appropriate format
proc = await asyncio.create_subprocess_exec(
self._ffmpeg_manager.binary,
"-i",
announcement.media_id,
"-f",
"s16le",
"-ac",
str(SAMPLE_CHANNELS),
"-ar",
str(_TTS_SAMPLE_RATE),
"-nostats",
"pipe:",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False, # use posix_spawn in CPython < 3.13
)
assert proc.stdout is not None
while True:
chunk_bytes = await proc.stdout.read(_ANNOUNCE_CHUNK_BYTES)
if not chunk_bytes:
break
chunk = AudioChunk(
rate=_TTS_SAMPLE_RATE,
width=SAMPLE_WIDTH,
channels=SAMPLE_CHANNELS,
audio=chunk_bytes,
timestamp=timestamp,
)
await self._client.write_event(chunk.event())
timestamp += chunk.milliseconds
finally:
await self._client.write_event(AudioStop().event())
if timestamp > 0:
# Wait the length of the audio or until we receive a played event
audio_seconds = timestamp / 1000
try:
async with asyncio.timeout(audio_seconds + 0.5):
await self._played_event_received.wait()
except TimeoutError:
# Older satellite clients will wait longer than necessary
_LOGGER.debug("Did not receive played event for announcement")
async def async_start_conversation(
self, start_announcement: AssistSatelliteAnnouncement
) -> None:
"""Start a conversation from the satellite."""
await self.async_announce(start_announcement)
self._run_pipeline_once(
RunPipeline(
start_stage=PipelineStage.ASR,
end_stage=PipelineStage.ASR,
restart_on_end=False,
)
)
def _custom_settings_changed(self) -> None:
"""Run when device screen settings change."""
if self._client is not None and self._client.can_write_event():
self.config_entry.async_create_background_task(
self.hass,
self._client.write_event(
CustomEvent(
SETTINGS_EVENT_TYPE,
{SETTINGS_EVENT_TYPE: self.device.custom_settings},
).event()
),
"custom settings event",
)
def _send_custom_action(
self, command: str, payload: str | float | None = None
) -> None:
"""Send a media player command to the satellite."""
if self._client is not None and self._client.can_write_event():
self.config_entry.async_create_background_task(
self.hass,
self._client.write_event(
CustomEvent(
ACTION_EVENT_TYPE,
{"action": command, "payload": payload},
).event()
),
"media player command",
)
async def _stream_tts(self, tts_result: tts.ResultStream) -> None:
"""Stream TTS WAV audio to satellite in chunks."""
assert self._client is not None
if tts_result.extension != "wav":
raise ValueError(
f"Cannot stream audio format to satellite: {tts_result.extension}"
)
# Track the total duration of TTS audio for response timeout
total_seconds = 0.0
start_time = time.monotonic()
try:
data = b"".join([chunk async for chunk in tts_result.async_stream_result()])
with io.BytesIO(data) as wav_io, wave.open(wav_io, "rb") as wav_file:
sample_rate = wav_file.getframerate()
sample_width = wav_file.getsampwidth()
sample_channels = wav_file.getnchannels()
_LOGGER.debug("Streaming %s TTS sample(s)", wav_file.getnframes())
# Start audio stream - set flag to allow streaming
self.stream_tts = True
timestamp = 0
await self._client.write_event(
AudioStart(
rate=sample_rate,
width=sample_width,
channels=sample_channels,
timestamp=timestamp,
).event()
)
# Stream audio chunks
while audio_bytes := wav_file.readframes(_SAMPLES_PER_CHUNK):
# If flag set to false, stop streaming
if not self.stream_tts:
_LOGGER.debug("TTS streaming interrupted")
break
chunk = AudioChunk(
rate=sample_rate,
width=sample_width,
channels=sample_channels,
audio=audio_bytes,
timestamp=timestamp,
)
await self._client.write_event(chunk.event())
timestamp += int(chunk.seconds)
total_seconds += chunk.seconds
await self._client.write_event(AudioStop(timestamp=timestamp).event())
_LOGGER.debug("TTS streaming complete")
finally:
send_duration = time.monotonic() - start_time
timeout_seconds = max(0, total_seconds - send_duration + _TTS_TIMEOUT_EXTRA)
if self._played_event_received is None:
self._played_event_received = asyncio.Event()
self._played_event_received.clear()
self.config_entry.async_create_background_task(
self.hass,
self._tts_timeout(timeout_seconds, self._run_loop_id),
name="wyoming TTS timeout",
)
async def _tts_timeout(
self, timeout_seconds: float, run_loop_id: str | None
) -> None:
"""Force state change to IDLE in case TTS played event isn't received."""
await asyncio.sleep(timeout_seconds + _TTS_TIMEOUT_EXTRA)
if (
self._played_event_received is not None
and self._played_event_received.is_set()
):
# Played event already received
return
if run_loop_id != self._run_loop_id:
# On a different pipeline run now
return
self.tts_response_finished()
@callback
def _handle_timer(
self, event_type: intent.TimerEventType, timer: intent.TimerInfo
) -> None:
"""Forward timer events to view assist."""
super()._handle_timer(event_type, timer)
# Send timer event to custom listeners
async_dispatcher_send(
self.hass,
f"{DOMAIN}_{self.device.device_id}_timer_event",
self.device.device_id,
event_type,
timer,
)
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/tts.py | custom_components/vaca/tts.py | """Support for Wyoming text-to-speech services."""
from collections import defaultdict
import io
import logging
import wave
from wyoming.audio import AudioChunk, AudioStop
from wyoming.client import AsyncTcpClient
from wyoming.tts import Synthesize, SynthesizeVoice
from homeassistant.components import tts
from homeassistant.components.wyoming import DomainDataItem, WyomingService
# pylint: disable-next=hass-component-root-import
from homeassistant.components.wyoming.error import WyomingError
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import ATTR_SPEAKER, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Wyoming speech-to-text."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
WyomingTtsProvider(config_entry, item.service),
]
)
class WyomingTtsProvider(tts.TextToSpeechEntity):
"""Wyoming text-to-speech provider."""
def __init__(
self,
config_entry: ConfigEntry,
service: WyomingService,
) -> None:
"""Set up provider."""
self.service = service
self._tts_service = next(tts for tts in service.info.tts if tts.installed)
voice_languages: set[str] = set()
self._voices: dict[str, list[tts.Voice]] = defaultdict(list)
for voice in self._tts_service.voices:
if not voice.installed:
continue
voice_languages.update(voice.languages)
for language in voice.languages:
self._voices[language].append(
tts.Voice(
voice_id=voice.name,
name=voice.description or voice.name,
)
)
# Sort voices by name
for language in self._voices:
self._voices[language] = sorted(
self._voices[language], key=lambda v: v.name
)
self._supported_languages: list[str] = list(voice_languages)
self._attr_name = self._tts_service.name
self._attr_unique_id = f"{config_entry.entry_id}-tts"
@property
def default_language(self):
"""Return default language."""
if not self._supported_languages:
return None
return self._supported_languages[0]
@property
def supported_languages(self):
"""Return list of supported languages."""
return self._supported_languages
@property
def supported_options(self):
"""Return list of supported options like voice, emotion."""
return [
tts.ATTR_AUDIO_OUTPUT,
tts.ATTR_VOICE,
ATTR_SPEAKER,
]
@property
def default_options(self):
"""Return a dict include default options."""
return {}
@callback
def async_get_supported_voices(self, language: str) -> list[tts.Voice] | None:
"""Return a list of supported voices for a language."""
return self._voices.get(language)
async def async_get_tts_audio(self, message, language, options):
"""Load TTS from TCP socket."""
voice_name: str | None = options.get(tts.ATTR_VOICE)
voice_speaker: str | None = options.get(ATTR_SPEAKER)
try:
async with AsyncTcpClient(self.service.host, self.service.port) as client:
voice: SynthesizeVoice | None = None
if voice_name is not None:
voice = SynthesizeVoice(name=voice_name, speaker=voice_speaker)
synthesize = Synthesize(text=message, voice=voice)
await client.write_event(synthesize.event())
with io.BytesIO() as wav_io:
wav_writer: wave.Wave_write | None = None
while True:
event = await client.read_event()
if event is None:
_LOGGER.debug("Connection lost")
return (None, None)
if AudioStop.is_type(event.type):
break
if AudioChunk.is_type(event.type):
chunk = AudioChunk.from_event(event)
if wav_writer is None:
wav_writer = wave.open(wav_io, "wb")
wav_writer.setframerate(chunk.rate)
wav_writer.setsampwidth(chunk.width)
wav_writer.setnchannels(chunk.channels)
wav_writer.writeframes(chunk.audio)
if wav_writer is not None:
wav_writer.close()
data = wav_io.getvalue()
except (OSError, WyomingError):
return (None, None)
return ("wav", data)
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/switch.py | custom_components/vaca/switch.py | """Wyoming switch entities."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_ON, EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import restore_state
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up switch entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert device is not None
entities = [
WyomingSatelliteMuteSwitch(device),
WyomingSatelliteSwipeToRefreshSwitch(device),
WyomingSatelliteScreenAutoBrightnessSwitch(device),
WyomingSatelliteScreenAlwaysOnSwitch(device),
WyomingSatelliteDarkModeSwitch(device),
WyomingSatelliteDiagnosticsSwitch(device),
WyomingSatelliteContinueConversationSwitch(device),
WyomingSatelliteAlarmSwitch(device),
WyomingSatelliteScreenOnWakeWordSwitch(device),
]
if capabilities := device.capabilities:
if capabilities.get("has_dnd"):
entities.append(WyomingSatelliteDNDSwitch(device))
if device.supportBump():
entities.append(WyomingSatelliteScreenOnBumpSwitch(device))
if device.supportProximity():
entities.append(WyomingSatelliteScreenOnProximitySwitch(device))
if device.capabilities and device.capabilities.get("has_front_camera"):
entities.append(WyomingSatelliteEnableMotionDetectionSwitch(device))
entities.append(WyomingSatelliteScreenOnMotionSwitch(device))
if entities:
async_add_entities(entities)
class BaseSwitch(VASatelliteEntity, restore_state.RestoreEntity, SwitchEntity):
"""Base class for all switch entities."""
entity_description: SwitchEntityDescription
default_on = False
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
# Set restore state or default
if state is None:
self._attr_is_on = self.default_on
else:
self._attr_is_on = state.state == STATE_ON
await self.do_switch(self._attr_is_on)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on."""
await self.do_switch(True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off."""
await self.do_switch(False)
async def do_switch(self, value: bool, send_to_device: bool = True) -> None:
"""Perform the switch action."""
self._attr_is_on = value
self.async_write_ha_state()
if send_to_device:
self._device.set_custom_setting(
self.entity_description.key, self._attr_is_on
)
class BaseFeedbackSwitch(BaseSwitch):
"""Base class for switches that receive feedback from device."""
_listener_class = "settings_update"
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self._device.device_id}_{self._listener_class}",
self.status_update,
)
)
async def status_update(self, data: dict[str, Any]) -> None:
"""Handle status update."""
if settings := data.get("settings"):
if self.entity_description.key in settings:
setting_state = settings[self.entity_description.key]
await self.do_switch(setting_state, send_to_device=False)
class WyomingSatelliteMuteSwitch(BaseSwitch):
"""Entity to represent if satellite is muted."""
entity_description = SwitchEntityDescription(key="mute", translation_key="mute")
default_on = False
@property
def icon(self) -> str:
"""Return the icon to use in the frontend."""
return "mdi:microphone-off" if self._attr_is_on else "mdi:microphone"
class WyomingSatelliteSwipeToRefreshSwitch(BaseSwitch):
"""Entity to control swipe to refresh."""
entity_description = SwitchEntityDescription(
key="swipe_refresh",
translation_key="swipe_refresh",
icon="mdi:web-refresh",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteScreenAutoBrightnessSwitch(BaseSwitch):
"""Entity to control swipe to refresh."""
entity_description = SwitchEntityDescription(
key="screen_auto_brightness",
translation_key="screen_auto_brightness",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteScreenAlwaysOnSwitch(BaseSwitch):
"""Entity to control screen always on."""
entity_description = SwitchEntityDescription(
key="screen_always_on",
translation_key="screen_always_on",
icon="mdi:monitor-screenshot",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteDarkModeSwitch(BaseSwitch):
"""Entity to control screen always on."""
entity_description = SwitchEntityDescription(
key="dark_mode",
translation_key="dark_mode",
icon="mdi:compare",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteDNDSwitch(BaseFeedbackSwitch):
"""Entity to control screen always on."""
entity_description = SwitchEntityDescription(
key="do_not_disturb",
translation_key="do_not_disturb",
icon="mdi:do-not-disturb",
)
default_on = False
class WyomingSatelliteDiagnosticsSwitch(BaseSwitch):
"""Entity to control diagnostics overlay on/off."""
entity_description = SwitchEntityDescription(
key="diagnostics_enabled",
translation_key="diagnostics_enabled",
icon="mdi:microphone-question",
entity_category=EntityCategory.DIAGNOSTIC,
)
default_on = False
class WyomingSatelliteContinueConversationSwitch(BaseSwitch):
"""Entity to control continue conversation on/off."""
entity_description = SwitchEntityDescription(
key="continue_conversation",
translation_key="continue_conversation",
icon="mdi:message-bulleted",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteAlarmSwitch(BaseFeedbackSwitch):
"""Entity to control alarm on/off."""
entity_description = SwitchEntityDescription(
key="alarm",
translation_key="alarm",
icon="mdi:alarm-bell",
)
default_on = False
async def do_switch(self, value: bool, send_to_device: bool = True) -> None:
"""Perform the switch action."""
self._attr_is_on = value
self.async_write_ha_state()
if send_to_device:
self._device.send_custom_action(
self.entity_description.key,
{
"activate": self._attr_is_on,
"url": "",
},
)
class WyomingSatelliteScreenOnWakeWordSwitch(BaseSwitch):
"""Entity to control screen on/off with wake word."""
entity_description = SwitchEntityDescription(
key="screen_on_wake_word",
translation_key="screen_on_wake_word",
icon="mdi:monitor-eye",
entity_category=EntityCategory.CONFIG,
)
default_on = True
class WyomingSatelliteScreenOnBumpSwitch(BaseSwitch):
"""Entity to control screen on with bump."""
entity_description = SwitchEntityDescription(
key="screen_on_bump",
translation_key="screen_on_bump",
icon="mdi:gesture-tap",
entity_category=EntityCategory.CONFIG,
)
default_on = False
class WyomingSatelliteScreenOnProximitySwitch(BaseSwitch):
"""Entity to control screen on with proximity."""
entity_description = SwitchEntityDescription(
key="screen_on_proximity",
translation_key="screen_on_proximity",
icon="mdi:radar",
entity_category=EntityCategory.CONFIG,
)
default_on = False
class WyomingSatelliteEnableMotionDetectionSwitch(BaseSwitch):
"""Entity to control motion detection."""
entity_description = SwitchEntityDescription(
key="enable_motion_detection",
translation_key="enable_motion_detection",
icon="mdi:motion-sensor",
entity_category=EntityCategory.CONFIG,
)
default_on = False
class WyomingSatelliteScreenOnMotionSwitch(BaseSwitch):
"""Entity to control screen on with motion."""
entity_description = SwitchEntityDescription(
key="screen_on_motion",
translation_key="screen_on_motion",
icon="mdi:motion-sensor",
entity_category=EntityCategory.CONFIG,
)
default_on = False
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/number.py | custom_components/vaca/number.py | """Number entities for Wyoming integration."""
from __future__ import annotations
from typing import TYPE_CHECKING, Final
from homeassistant.components.number import NumberEntityDescription, RestoreNumber
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
_MAX_MIC_GAIN: Final = 100
_MIN_SOUND_VOLUME: Final = 0
_MAX_SOUND_VOLUME: Final = 10
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up number entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
entities = []
entities.extend(
[
WyomingSatelliteMicGainNumber(device),
WyomingSatelliteNotificationVolumeNumber(device),
WyomingSatelliteMusicVolumeNumber(device),
WyomingSatelliteDuckingVolumeNumber(device),
WyomingSatelliteScreenBrightnessNumber(device),
WyomingSatelliteWakeWordThresholdNumber(device),
WyomingSatelliteZoomLevelNumber(device),
]
)
if device.capabilities and device.capabilities.get("has_front_camera"):
entities.append(WyomingSatelliteMotionDetectionSensitivityNumber(device))
async_add_entities(entities)
class WyomingSatelliteMicGainNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent mic gain amount."""
entity_description = NumberEntityDescription(
key="mic_gain",
translation_key="mic_gain",
icon="mdi:microphone-plus",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = -10
_attr_native_max_value = 10
_attr_native_value = 0
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
await self.async_set_native_value(float(state.state))
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
mic_gain = int(max(-10, min(10, value)))
self._attr_native_value = mic_gain
self.async_write_ha_state()
self._device.set_custom_setting("mic_gain", mic_gain)
class WyomingSatelliteNotificationVolumeNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent notification volume multiplier."""
entity_description = NumberEntityDescription(
key="notification_volume",
translation_key="notification_volume",
icon="mdi:speaker-message",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = _MIN_SOUND_VOLUME
_attr_native_max_value = _MAX_SOUND_VOLUME
_attr_native_step = 1
_attr_native_value = 5
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
last_number_data = await self.async_get_last_number_data()
if (last_number_data is not None) and (
last_number_data.native_value is not None
):
await self.async_set_native_value(last_number_data.native_value)
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
self._attr_native_value = int(
max(_MIN_SOUND_VOLUME, min(_MAX_SOUND_VOLUME, value))
)
self.async_write_ha_state()
self._device.set_custom_setting("notification_volume", int(value * 10))
class WyomingSatelliteMusicVolumeNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent media volume multiplier."""
entity_description = NumberEntityDescription(
key="music_volume",
translation_key="music_volume",
icon="mdi:music",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = _MIN_SOUND_VOLUME
_attr_native_max_value = _MAX_SOUND_VOLUME
_attr_native_step = 1
_attr_native_value = 5
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
last_number_data = await self.async_get_last_number_data()
if (last_number_data is not None) and (
last_number_data.native_value is not None
):
await self.async_set_native_value(last_number_data.native_value)
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
self._attr_native_value = int(
max(_MIN_SOUND_VOLUME, min(_MAX_SOUND_VOLUME, value))
)
self.async_write_ha_state()
self._device.set_custom_setting("music_volume", int(value * 10))
class WyomingSatelliteDuckingVolumeNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent media volume multiplier."""
entity_description = NumberEntityDescription(
key="ducking_volume",
translation_key="ducking_volume",
icon="mdi:volume-low",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = 0
_attr_native_max_value = 10
_attr_native_step = 0.1
_attr_native_value = 1
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
last_number_data = await self.async_get_last_number_data()
if (last_number_data is not None) and (
last_number_data.native_value is not None
):
await self.async_set_native_value(last_number_data.native_value)
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
self._attr_native_value = int(max(0, min(10, value)))
self.async_write_ha_state()
self._device.set_custom_setting("ducking_volume", value * 10)
class WyomingSatelliteScreenBrightnessNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent auto gain amount."""
entity_description = NumberEntityDescription(
key="screen_brightness",
translation_key="screen_brightness",
icon="mdi:brightness-4",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = 0
_attr_native_max_value = 100
_attr_native_value = 50
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
await self.async_set_native_value(float(state.state))
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
screen_brightness = int(max(0, min(100, value)))
self._attr_native_value = screen_brightness
self.async_write_ha_state()
self._device.set_custom_setting("screen_brightness", screen_brightness)
class WyomingSatelliteWakeWordThresholdNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent wake word trigger threshold."""
entity_description = NumberEntityDescription(
key="wake_word_threshold",
translation_key="wake_word_threshold",
icon="mdi:account-voice",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = 0
_attr_native_max_value = 10
_attr_native_value = 6
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
await self.async_set_native_value(float(state.state))
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
value = int(max(0, min(10, value)))
self._attr_native_value = value
self.async_write_ha_state()
self._device.set_custom_setting(self.entity_description.key, value)
class WyomingSatelliteZoomLevelNumber(VASatelliteEntity, RestoreNumber):
"""Entity to represent zoom level."""
entity_description = NumberEntityDescription(
key="zoom_level",
translation_key="zoom_level",
icon="mdi:magnify-plus",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = 0
_attr_native_max_value = 2.5
_attr_native_step = 0.1
_attr_native_value = 0
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
await self.async_set_native_value(float(state.state))
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
value = max(0, min(self._attr_native_max_value, value))
self._attr_native_value = value
self.async_write_ha_state()
self._device.set_custom_setting(
self.entity_description.key, int(value * 100) + 60 if value > 0 else 0
)
class WyomingSatelliteMotionDetectionSensitivityNumber(
VASatelliteEntity, RestoreNumber
):
"""Entity to represent zoom level."""
entity_description = NumberEntityDescription(
key="motion_detection_sensitivity",
translation_key="motion_detection_sensitivity",
icon="mdi:tune-variant",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_native_min_value = 0
_attr_native_max_value = 100
_attr_native_step = 1
_attr_native_value = 70
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
await self.async_set_native_value(float(state.state))
async def async_set_native_value(self, value: float) -> None:
"""Set new value."""
value = max(0, min(self._attr_native_max_value, value))
self._attr_native_value = value
self.async_write_ha_state()
# Sensitivity is sent as 0-50 scale
self._device.set_custom_setting(self.entity_description.key, int(value / 2))
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/binary_sensor.py | custom_components/vaca/binary_sensor.py | """Binary Sensor for Wyoming."""
from __future__ import annotations
import asyncio
from asyncio import Task
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from .const import DOMAIN
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up sensor entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
entities = []
entities.append(WyomingSatelliteScreenOnBinarySensor(device))
if capabilities := device.capabilities:
if capabilities.get("has_battery"):
entities.append(WyomingSatelliteBatteryChargingBinarySensor(device))
if capabilities.get("has_front_camera"):
entities.append(WyomingSatelliteMotionDetectedSensor(device))
if entities:
async_add_entities(entities)
class _WyomingSatelliteDeviceBinarySensorBase(
VASatelliteEntity, BinarySensorEntity, RestoreEntity
):
"""Base class for device sensors."""
_attr_is_on = False
_listener_class = "status_update"
_dont_restore_state = False
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
if not self._dont_restore_state:
state = await self.async_get_last_state()
if state is not None:
# Restore the state of the binary sensor
self._attr_is_on = bool(state.state)
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self._device.device_id}_{self._listener_class}",
self.status_update,
)
)
def _get_binary_value(self, value: Any) -> Any:
"""Get the binary value from the data."""
if isinstance(value, str):
return value.lower() in ("true", "1", "yes")
return value
@callback
def status_update(self, data: dict[str, Any]) -> None:
"""Update entity."""
if sensors := data.get("sensors"):
if self.entity_description.key in sensors:
self._attr_is_on = self._get_binary_value(
sensors[self.entity_description.key]
)
self.async_write_ha_state()
class WyomingSatelliteBatteryChargingBinarySensor(
_WyomingSatelliteDeviceBinarySensorBase
):
"""Entity to represent battery charging sensor for satellite."""
entity_description = BinarySensorEntityDescription(
key="battery_charging",
translation_key="battery_charging",
device_class=BinarySensorDeviceClass.BATTERY_CHARGING,
)
class WyomingSatelliteScreenOnBinarySensor(_WyomingSatelliteDeviceBinarySensorBase):
"""Entity to represent screen on status sensor for satellite."""
entity_description = BinarySensorEntityDescription(
key="screen_on", translation_key="screen_on", icon="mdi:monitor"
)
class WyomingSatelliteMotionDetectedSensor(_WyomingSatelliteDeviceBinarySensorBase):
"""Entity to represent screen on status sensor for satellite."""
detection_reset_task: Task | None = None
_dont_restore_state = True
entity_description = BinarySensorEntityDescription(
key="motion_detected",
translation_key="motion_detected",
icon="mdi:monitor",
device_class=BinarySensorDeviceClass.MOTION,
)
@callback
def status_update(self, data: dict[str, Any]) -> None:
"""Update entity."""
super().status_update(data)
if (
self.detection_reset_task is not None
and not self.detection_reset_task.done()
):
self.detection_reset_task.cancel()
self.detection_reset_task = self.hass.async_create_background_task(
self.reset_detection(), name="VACA Motion Detection Reset"
)
async def reset_detection(self) -> None:
"""Reset motion detection."""
await asyncio.sleep(20)
self._attr_is_on = False
self.schedule_update_ha_state()
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/const.py | custom_components/vaca/const.py | """Constants for the Wyoming integration."""
DOMAIN = "vaca"
MIN_APK_VERSION = "0.8.2"
SAMPLE_RATE = 16000
SAMPLE_WIDTH = 2
SAMPLE_CHANNELS = 1
# For multi-speaker voices, this is the name of the selected speaker.
ATTR_SPEAKER = "speaker"
INTENT_EVENT = f"{DOMAIN}_intent_event"
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/devices.py | custom_components/vaca/devices.py | """Class to manage satellite devices."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from wyoming.info import Info
from homeassistant.components.wyoming import SatelliteDevice
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import entity_registry as er
from .const import DOMAIN
@dataclass
class VASatelliteDevice(SatelliteDevice):
"""VACA Class to store device."""
info: Info | None = None
custom_settings: dict[str, Any] | None = None
capabilities: dict[str, Any] | None = None
_custom_settings_listener: Callable[[], None] | None = None
_custom_action_listener: Callable[[Any, Any], None] | None = None
stt_listener: Callable[[str], None] | None = None
tts_listener: Callable[[str], None] | None = None
def get_pipeline_entity_id(self, hass: HomeAssistant) -> str | None:
"""Return entity id for pipeline select."""
ent_reg = er.async_get(hass)
return ent_reg.async_get_entity_id(
"select", DOMAIN, f"{self.satellite_id}-pipeline"
)
def get_noise_suppression_level_entity_id(self, hass: HomeAssistant) -> str | None:
"""Return entity id for noise suppression select."""
ent_reg = er.async_get(hass)
return ent_reg.async_get_entity_id(
"select", DOMAIN, f"{self.satellite_id}-noise_suppression_level"
)
def get_vad_sensitivity_entity_id(self, hass: HomeAssistant) -> str | None:
"""Return entity id for VAD sensitivity."""
ent_reg = er.async_get(hass)
return ent_reg.async_get_entity_id(
"select", DOMAIN, f"{self.satellite_id}-vad_sensitivity"
)
@callback
def set_custom_setting(self, setting: str, value: str | float) -> None:
"""Set custom setting."""
if self.custom_settings is None:
self.custom_settings = {}
if setting not in self.custom_settings:
self.custom_settings[setting] = value
elif self.custom_settings[setting] == value:
return
else:
self.custom_settings[setting] = value
if self._custom_settings_listener is not None:
self._custom_settings_listener()
@callback
def send_custom_action(
self, command: str, payload: dict[str, Any] | None = None
) -> None:
"""Send a media player command."""
if self._custom_action_listener is not None:
self._custom_action_listener(command, payload)
@callback
def set_custom_settings_listener(
self, custom_settings_listener: Callable[[], None]
) -> None:
"""Listen for updates to custom settings."""
self._custom_settings_listener = custom_settings_listener
@callback
def set_custom_action_listener(
self, custom_action_listener: Callable[[Any, Any], None]
) -> None:
"""Listen for stt updates."""
self._custom_action_listener = custom_action_listener
@callback
def set_stt_listener(self, stt_listener: Callable[[str], None]) -> None:
"""Listen for stt updates."""
self.stt_listener = stt_listener
@callback
def set_tts_listener(self, tts_listener: Callable[[str], None]) -> None:
"""Listen for stt updates."""
self.tts_listener = tts_listener
def has_light_sensor(self) -> bool:
"""Check if the device has a light sensor."""
if self.capabilities and (sensors := self.capabilities.get("sensors")):
for sensor in sensors:
if sensor.get("type") == 5: # Light sensor type
return True
return False
def supportBump(self) -> bool:
"""Check if the device supports bump proximity feature."""
if self.capabilities and (sensors := self.capabilities.get("sensors")):
for sensor in sensors:
if sensor.get("type") == 1: # Accelerometer type
return True
return False
def supportProximity(self) -> bool:
"""Check if the device supports bump proximity feature."""
if self.capabilities and (sensors := self.capabilities.get("sensors")):
for sensor in sensors:
if sensor.get("type") == 8: # Proximity type
return True
return False
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/wake_word.py | custom_components/vaca/wake_word.py | """Support for Wyoming wake-word-detection services."""
import asyncio
from collections.abc import AsyncIterable
import logging
from wyoming.audio import AudioChunk, AudioStart
from wyoming.client import AsyncTcpClient
from wyoming.wake import Detect, Detection
from homeassistant.components import wake_word
from homeassistant.components.wyoming import DomainDataItem, WyomingService
# pylint: disable-next=hass-component-root-import
from homeassistant.components.wyoming.data import load_wyoming_info
# pylint: disable-next=hass-component-root-import
from homeassistant.components.wyoming.error import WyomingError
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up Wyoming wake-word-detection."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
WyomingWakeWordProvider(hass, config_entry, item.service),
]
)
class WyomingWakeWordProvider(wake_word.WakeWordDetectionEntity):
"""Wyoming wake-word-detection provider."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
service: WyomingService,
) -> None:
"""Set up provider."""
self.hass = hass
self.service = service
wake_service = service.info.wake[0]
self._supported_wake_words = [
wake_word.WakeWord(
id=ww.name, name=ww.description or ww.name, phrase=ww.phrase
)
for ww in wake_service.models
]
self._attr_name = wake_service.name
self._attr_unique_id = f"{config_entry.entry_id}-wake_word"
async def get_supported_wake_words(self) -> list[wake_word.WakeWord]:
"""Return a list of supported wake words."""
info = await load_wyoming_info(
self.service.host, self.service.port, retries=0, timeout=1
)
if info is not None:
wake_service = info.wake[0]
self._supported_wake_words = [
wake_word.WakeWord(
id=ww.name,
name=ww.description or ww.name,
phrase=ww.phrase,
)
for ww in wake_service.models
]
return self._supported_wake_words
async def _async_process_audio_stream(
self, stream: AsyncIterable[tuple[bytes, int]], wake_word_id: str | None
) -> wake_word.DetectionResult | None:
"""Try to detect one or more wake words in an audio stream.
Audio must be 16Khz sample rate with 16-bit mono PCM samples.
"""
async def next_chunk():
"""Get the next chunk from audio stream."""
async for chunk_bytes in stream:
return chunk_bytes
return None
try:
async with AsyncTcpClient(self.service.host, self.service.port) as client:
# Inform client which wake word we want to detect (None = default)
await client.write_event(
Detect(names=[wake_word_id] if wake_word_id else None).event()
)
await client.write_event(
AudioStart(
rate=16000,
width=2,
channels=1,
).event(),
)
# Read audio and wake events in "parallel"
audio_task = asyncio.create_task(next_chunk())
wake_task = asyncio.create_task(client.read_event())
pending = {audio_task, wake_task}
try:
while True:
done, pending = await asyncio.wait(
pending, return_when=asyncio.FIRST_COMPLETED
)
if wake_task in done:
event = wake_task.result()
if event is None:
_LOGGER.debug("Connection lost")
break
if Detection.is_type(event.type):
# Possible detection
detection = Detection.from_event(event)
_LOGGER.info(detection)
if wake_word_id and (detection.name != wake_word_id):
_LOGGER.warning(
"Expected wake word %s but got %s, skipping",
wake_word_id,
detection.name,
)
wake_task = asyncio.create_task(client.read_event())
pending.add(wake_task)
continue
# Retrieve queued audio
queued_audio: list[tuple[bytes, int]] | None = None
if audio_task in pending:
# Save queued audio
await audio_task
pending.remove(audio_task)
queued_audio = [audio_task.result()] # pyright: ignore[reportAssignmentType]
return wake_word.DetectionResult(
wake_word_id=detection.name or "",
wake_word_phrase=self._get_phrase(
detection.name or ""
),
timestamp=detection.timestamp,
queued_audio=queued_audio,
)
# Next event
wake_task = asyncio.create_task(client.read_event())
pending.add(wake_task)
if audio_task in done:
# Forward audio to wake service
chunk_info = audio_task.result()
if chunk_info is None:
break
chunk_bytes, chunk_timestamp = chunk_info
chunk = AudioChunk(
rate=16000,
width=2,
channels=1,
audio=chunk_bytes,
timestamp=chunk_timestamp,
)
await client.write_event(chunk.event())
# Next chunk
audio_task = asyncio.create_task(next_chunk())
pending.add(audio_task)
finally:
# Clean up
if audio_task in pending:
# It's critical that we don't cancel the audio task or
# leave it hanging. This would mess up the pipeline STT
# by stopping the audio stream.
await audio_task
pending.remove(audio_task)
for task in pending:
task.cancel()
except (OSError, WyomingError):
_LOGGER.exception("Error processing audio stream")
return None
def _get_phrase(self, model_id: str) -> str:
"""Get wake word phrase for model id."""
for ww_model in self._supported_wake_words:
if not ww_model.phrase:
continue
if ww_model.id == model_id:
return ww_model.phrase
return model_id
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/sensor.py | custom_components/vaca/sensor.py | """Sensor for Wyoming."""
from __future__ import annotations
from functools import reduce
import logging
from typing import TYPE_CHECKING, Any
from homeassistant.components.sensor import (
RestoreSensor,
SensorDeviceClass,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import LIGHT_LUX, PERCENTAGE, EntityCategory
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.util.dt import parse_datetime
from .const import DOMAIN
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
UNKNOWN: str = "unknown"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up sensor entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
entities = [
WyomingSatelliteSTTSensor(device),
WyomingSatelliteTTSSensor(device),
WyomingSatelliteIntentSensor(device),
WyomingSatelliteOrientationSensor(device),
WyomingSatelliteBrowserPathSensor(device),
]
if capabilities := device.capabilities:
if capabilities.get("app_version"):
entities.append(WyomingSatelliteAppVersionSensor(device))
if capabilities.get("has_battery"):
entities.append(WyomingSatelliteBatteryLevelSensor(device))
if device.has_light_sensor():
entities.append(WyomingSatelliteLightSensor(device))
if capabilities.get("has_front_camera"):
entities.append(WyomingSatelliteLastMotionSensor(device))
async_add_entities(entities)
class WyomingSatelliteSTTSensor(VASatelliteEntity, RestoreSensor):
"""Entity to represent STT sensor for satellite."""
entity_description = SensorEntityDescription(
key="stt",
translation_key="stt",
icon="mdi:microphone-message",
)
_attr_native_value = UNKNOWN
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
self._value_changed(state.state)
self._device.set_stt_listener(self._value_changed)
@callback
def _value_changed(self, value: str) -> None:
"""Call when value changed."""
if value:
if len(value) > 254:
# Limit the length of the value to avoid issues with Home Assistant
value = value[:252] + ".."
self._attr_native_value = value
self.async_write_ha_state()
class WyomingSatelliteTTSSensor(VASatelliteEntity, RestoreSensor):
"""Entity to represent TTS sensor for satellite."""
entity_description = SensorEntityDescription(
key="tts", translation_key="tts", icon="mdi:speaker-message"
)
_attr_native_value = UNKNOWN
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
self._value_changed(state.state)
self._device.set_tts_listener(self._value_changed)
@callback
def _value_changed(self, value: str) -> None:
"""Call when value changed."""
if value:
if len(value) > 254:
# Limit the length of the value to avoid issues with Home Assistant
value = value[:252] + ".."
self._attr_native_value = value
self.async_write_ha_state()
class WyomingSatelliteIntentSensor(VASatelliteEntity, RestoreSensor):
"""Entity to represent intent sensor for satellite."""
entity_description = SensorEntityDescription(
key="intent", translation_key="intent", icon="mdi:message-bulleted"
)
_attr_native_value = UNKNOWN
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
self._attr_native_value = state.state
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self._device.device_id}_intent_output",
self.status_update,
)
)
@callback
def status_update(self, data: dict[str, Any]) -> None:
"""Update entity."""
if data and data.get("intent_output"):
value = str(
self.get_key("intent_output.response.speech.plain.speech", data)
)
if value:
if len(value) > 254:
# Limit the length of the value to avoid issues with Home Assistant
value = value[:252] + ".."
self._attr_native_value = value
self._attr_extra_state_attributes = data
self.async_write_ha_state()
def get_key(
self, dot_notation_path: str, data: dict
) -> dict[str, dict | str | int] | str | int | None:
"""Try to get a deep value from a dict based on a dot-notation."""
try:
if "." in dot_notation_path:
dn_list = dot_notation_path.split(".")
else:
dn_list = [dot_notation_path]
return reduce(dict.get, dn_list, data) # type: ignore[return-value]
except (TypeError, KeyError):
return None
class _WyomingSatelliteDeviceSensorBase(VASatelliteEntity, RestoreSensor):
"""Base class for device sensors."""
_attr_native_value = 0
_listener_class = "status_update"
async def async_added_to_hass(self) -> None:
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None:
if self.entity_description.device_class == SensorDeviceClass.TIMESTAMP:
self._attr_native_value = self._get_timestamp_from_string(state.state)
else:
self._attr_native_value = state.state
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self._device.device_id}_{self._listener_class}",
self.status_update,
)
)
def _get_native_value(self, value: Any) -> Any:
"""Get the native value from the data."""
if isinstance(value, (int, float)):
return value
if isinstance(value, str):
if value.isdigit():
return int(value)
return value
return value
def _get_timestamp_from_string(self, timestamp_str: str) -> Any:
"""Convert timestamp string to datetime object."""
parsed_time = parse_datetime(timestamp_str)
if parsed_time:
return parsed_time
return parse_datetime("1970-01-01T00:00:00Z")
@callback
def status_update(self, data: dict[str, Any]) -> None:
"""Update entity."""
if self._listener_class == "status_update":
if sensors := data.get("sensors"):
if self.entity_description.key in sensors:
if (
self.entity_description.device_class
== SensorDeviceClass.TIMESTAMP
):
# Handle timestamp conversion
timestamp_str = sensors[self.entity_description.key]
self._attr_native_value = self._get_timestamp_from_string(
timestamp_str
)
else:
self._attr_native_value = self._get_native_value(
sensors[self.entity_description.key]
)
self.async_write_ha_state()
elif self._listener_class == "capabilities_update":
if self._device.capabilities and self._device.capabilities.get(
self.entity_description.key
):
self._attr_native_value = self._get_native_value(
self._device.capabilities[self.entity_description.key]
)
self.async_write_ha_state()
class WyomingSatelliteLightSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent light sensor for satellite."""
entity_description = SensorEntityDescription(
key="light",
translation_key="light_level",
device_class=SensorDeviceClass.ILLUMINANCE,
native_unit_of_measurement=LIGHT_LUX,
suggested_display_precision=0,
)
class WyomingSatelliteOrientationSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent orientation sensor for satellite."""
_attr_native_value = UNKNOWN
entity_description = SensorEntityDescription(
key="orientation", translation_key="orientation", icon="mdi:screen-rotation"
)
class WyomingSatelliteBatteryLevelSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent battery level sensor for satellite."""
entity_description = SensorEntityDescription(
key="battery_level",
translation_key="battery_level",
device_class=SensorDeviceClass.BATTERY,
native_unit_of_measurement=PERCENTAGE,
)
class WyomingSatelliteBrowserPathSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent browser path sensor for satellite."""
_attr_native_value = UNKNOWN
entity_description = SensorEntityDescription(
key="current_path", translation_key="current_path", icon="mdi:web"
)
class WyomingSatelliteLastMotionSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent last motion for satellite."""
_attr_native_value = UNKNOWN
entity_description = SensorEntityDescription(
key="last_motion",
translation_key="last_motion",
icon="mdi:motion-sensor",
device_class=SensorDeviceClass.TIMESTAMP,
)
class WyomingSatelliteAppVersionSensor(_WyomingSatelliteDeviceSensorBase):
"""Entity to represent app version sensor for satellite."""
_listener_class = "capabilities_update"
_attr_native_value = UNKNOWN
entity_description = SensorEntityDescription(
key="app_version",
translation_key="app_version",
icon="mdi:application",
entity_category=EntityCategory.DIAGNOSTIC,
)
def _get_native_value(self, value: Any) -> Any:
"""Get the native value from the data."""
return value if value is not None else UNKNOWN
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return entity attributes."""
return {
"device_signature": self.get_capability("device_signature"),
"android_version": self.get_capability("release"),
"webview_version": self.get_capability("webview_version"),
"has_battery": self.get_capability("has_battery"),
"has_front_camera": self.get_capability("has_front_camera"),
"has_light_sensor": self._device.has_light_sensor(),
"sensors": self.get_sensor_names(),
}
def get_capability(self, capability: str) -> Any:
"""Get a specific capability from the device."""
if self._device.capabilities is None:
return UNKNOWN
return self._device.capabilities.get(capability, UNKNOWN)
def get_sensor_names(self) -> list[str] | None:
"""Get the names of all sensors."""
if self._device.capabilities and (
sensors := self._device.capabilities.get("sensors")
):
return [sensor.get("name") for sensor in sensors]
return None
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/__init__.py | custom_components/vaca/__init__.py | """The Wyoming integration."""
from __future__ import annotations
import asyncio
import logging
from typing import Any
from homeassistant.components.wyoming import (
DomainDataItem,
WyomingService,
async_register_websocket_api,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.typing import ConfigType
from .client import AsyncTcpClient
from .const import ATTR_SPEAKER, DOMAIN
from .custom import CustomEvent
from .devices import VASatelliteDevice
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = cv.empty_config_schema(DOMAIN)
SATELLITE_PLATFORMS = [
Platform.ASSIST_SATELLITE,
Platform.BINARY_SENSOR,
Platform.BUTTON,
Platform.SELECT,
Platform.SWITCH,
Platform.MEDIA_PLAYER,
Platform.NUMBER,
Platform.SENSOR,
]
__all__ = [
"ATTR_SPEAKER",
"DOMAIN",
"async_setup",
"async_setup_entry",
"async_unload_entry",
]
class WyomingError(HomeAssistantError):
"""Base class for Wyoming errors."""
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Wyoming integration."""
async_register_websocket_api(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load Wyoming."""
service = await WyomingService.create(entry.data["host"], entry.data["port"])
if service is None:
raise ConfigEntryNotReady("Unable to connect")
item = DomainDataItem(service=service)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = item
await hass.config_entries.async_forward_entry_setups(entry, service.platforms)
entry.async_on_unload(entry.add_update_listener(update_listener))
if (satellite_info := service.info.satellite) is not None:
# Create satellite device
dev_reg = dr.async_get(hass)
# Use config entry id since only one satellite per entry is supported
satellite_id = entry.entry_id
device = dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, satellite_id)},
name=satellite_info.name,
suggested_area=satellite_info.area,
)
item.device = VASatelliteDevice(
satellite_id=satellite_id,
device_id=device.id,
)
item.device.capabilities = await get_device_capabilities(item)
# Set up satellite entity, sensors, switches, etc.
await hass.config_entries.async_forward_entry_setups(entry, SATELLITE_PLATFORMS)
return True
async def update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload Wyoming."""
item: DomainDataItem = hass.data[DOMAIN][entry.entry_id]
platforms = list(item.service.platforms)
if item.device is not None:
platforms += SATELLITE_PLATFORMS
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
del hass.data[DOMAIN][entry.entry_id]
return unload_ok
async def get_device_capabilities(item: DomainDataItem):
"""Get device capabilities."""
capabilities: dict[str, Any] | None = None
for _ in range(4):
try:
async with (
AsyncTcpClient(item.service.host, item.service.port) as client,
asyncio.timeout(1),
):
# Describe -> Info
await client.write_event(CustomEvent("capabilities").event())
while True:
event = await client.read_event()
if event is None:
raise WyomingError( # noqa: TRY301
"Connection closed unexpectedly",
)
if CustomEvent.is_type(event.type) and (
event_data := CustomEvent.from_event(event).event_data
):
capabilities = event_data.get("capabilities")
break # while
if capabilities is not None:
break # for
except (TimeoutError, OSError, WyomingError) as ex:
_LOGGER.warning(
"Error getting device capabilities: %s, %s", ex, capabilities
)
# Sleep and try again
await asyncio.sleep(2)
return capabilities
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/entity.py | custom_components/vaca/entity.py | """Wyoming entities."""
from __future__ import annotations
from homeassistant.helpers import entity
from homeassistant.helpers.device_registry import DeviceEntryType, DeviceInfo
from .const import DOMAIN
from .devices import VASatelliteDevice
class VASatelliteEntity(entity.Entity):
"""Wyoming satellite entity."""
_attr_has_entity_name = True
_attr_should_poll = False
def __init__(self, device: VASatelliteDevice) -> None:
"""Initialize entity."""
self._device = device
self._attr_unique_id = f"{device.satellite_id}-{self.entity_description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, device.satellite_id)},
entry_type=DeviceEntryType.SERVICE,
)
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/button.py | custom_components/vaca/button.py | """Wyoming button entities."""
from __future__ import annotations
from typing import TYPE_CHECKING
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
from .custom import CustomActions
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up switch entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
async_add_entities(
[
WyomingSatelliteWakeButton(device),
WyomingSatelliteRefreshButton(device),
WyomingScreenSleepButton(device),
WyomingScreenWakeButton(device),
]
)
class WyomingSatelliteWakeButton(VASatelliteEntity, ButtonEntity):
"""Entity to represent if satellite is muted."""
entity_description = ButtonEntityDescription(
key="wake", translation_key="wake", icon="mdi:account-voice"
)
async def async_press(self) -> None:
"""Press the button."""
self._device.send_custom_action(CustomActions.WAKE)
class WyomingSatelliteRefreshButton(VASatelliteEntity, ButtonEntity):
"""Entity to represent if satellite is muted."""
entity_description = ButtonEntityDescription(
key="refresh", translation_key="refresh", icon="mdi:web-refresh"
)
async def async_press(self) -> None:
"""Press the button."""
self._device.send_custom_action(CustomActions.REFRESH)
class WyomingScreenSleepButton(VASatelliteEntity, ButtonEntity):
"""Entity to represent if screen is put to sleep."""
entity_description = ButtonEntityDescription(
key="screen_sleep", translation_key="screen_sleep", icon="mdi:monitor-off"
)
async def async_press(self) -> None:
"""Press the button."""
self._device.send_custom_action(CustomActions.SCREEN_SLEEP)
class WyomingScreenWakeButton(VASatelliteEntity, ButtonEntity):
"""Entity to represent if screen is woken up."""
entity_description = ButtonEntityDescription(
key="screen_wake", translation_key="screen_wake", icon="mdi:monitor-shimmer"
)
async def async_press(self) -> None:
"""Press the button."""
self._device.send_custom_action(CustomActions.SCREEN_WAKE)
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/select.py | custom_components/vaca/select.py | """Select entities for Wyoming integration."""
from __future__ import annotations
from typing import TYPE_CHECKING, Final
from homeassistant.components.assist_pipeline import (
AssistPipelineSelect,
VadSensitivity,
VadSensitivitySelect,
)
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EntityCategory
from homeassistant.core import HomeAssistant
from homeassistant.helpers import restore_state
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from .const import DOMAIN
from .devices import VASatelliteDevice
from .entity import VASatelliteEntity
if TYPE_CHECKING:
from homeassistant.components.wyoming import DomainDataItem
_NOISE_SUPPRESSION_LEVEL: Final = {
"off": 0,
"low": 1,
"medium": 2,
"high": 3,
"max": 4,
}
_DEFAULT_NOISE_SUPPRESSION_LEVEL: Final = "off"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddConfigEntryEntitiesCallback,
) -> None:
"""Set up select entities."""
item: DomainDataItem = hass.data[DOMAIN][config_entry.entry_id]
device: VASatelliteDevice = item.device # type: ignore[assignment]
# Setup is only forwarded for satellites
assert item.device is not None
async_add_entities(
[
WyomingSatellitePipelineSelect(hass, device),
WyomingSatelliteNoiseSuppressionLevelSelect(device),
WyomingSatelliteVadSensitivitySelect(hass, device),
WyomingSatelliteWakeWordSelect(device),
WyomingSatelliteWakeWordSoundSelect(device),
WyomingSatelliteScreenTimeoutSelect(device),
]
)
class WyomingSatellitePipelineSelect(VASatelliteEntity, AssistPipelineSelect):
"""Pipeline selector for Wyoming satellites."""
def __init__(self, hass: HomeAssistant, device: VASatelliteDevice) -> None:
"""Initialize a pipeline selector."""
self.device = device
VASatelliteEntity.__init__(self, device)
AssistPipelineSelect.__init__(self, hass, DOMAIN, device.satellite_id)
async def async_select_option(self, option: str) -> None:
"""Select an option."""
await super().async_select_option(option)
self.device.set_pipeline_name(option)
class WyomingSatelliteNoiseSuppressionLevelSelect(
VASatelliteEntity, SelectEntity, restore_state.RestoreEntity
):
"""Entity to represent noise suppression level setting."""
entity_description = SelectEntityDescription(
key="noise_suppression_level",
translation_key="noise_suppression_level",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_current_option = _DEFAULT_NOISE_SUPPRESSION_LEVEL
_attr_options = list(_NOISE_SUPPRESSION_LEVEL.keys())
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and state.state in self.options:
self._attr_current_option = state.state
async def async_select_option(self, option: str) -> None:
"""Select an option."""
self._attr_current_option = option
self.async_write_ha_state()
self._device.set_noise_suppression_level(_NOISE_SUPPRESSION_LEVEL[option])
class WyomingSatelliteVadSensitivitySelect(VASatelliteEntity, VadSensitivitySelect):
"""VAD sensitivity selector for Wyoming satellites."""
def __init__(self, hass: HomeAssistant, device: VASatelliteDevice) -> None:
"""Initialize a VAD sensitivity selector."""
self.device = device
VASatelliteEntity.__init__(self, device)
VadSensitivitySelect.__init__(self, hass, device.satellite_id)
async def async_select_option(self, option: str) -> None:
"""Select an option."""
await super().async_select_option(option)
self.device.set_vad_sensitivity(VadSensitivity(option))
class WyomingSatelliteWakeWordSelect(
VASatelliteEntity, SelectEntity, restore_state.RestoreEntity
):
"""Entity to represent wake word setting."""
entity_description = SelectEntityDescription(
key="wake_word",
translation_key="wake_word",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_current_option = "hey_jarvis"
@property
def options(self) -> list[str]:
"""Return the list of available wake word options."""
options = ["None"]
options.extend(self.get_wake_word_options())
return options
def get_wake_word_options(self) -> list[str]:
"""Return the list of available wake word options."""
wake_options: list[str] = []
if self._device.info and self._device.info.wake:
for wake_program in self._device.info.wake:
if wake_program.name == "available_wake_words":
wake_options = [
model.name.replace("_", " ").title()
for model in wake_program.models
]
return wake_options
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and state.state in self.options:
await self.async_select_option(state.state)
# Default to the first available option if no state is found
elif self.options:
await self.async_select_option(self.options[0])
async def async_select_option(self, option: str) -> None:
"""Select an option."""
self._attr_current_option = option
self.async_write_ha_state()
self._device.set_custom_setting("wake_word", option.lower().replace(" ", "_"))
class WyomingSatelliteWakeWordSoundSelect(
VASatelliteEntity, SelectEntity, restore_state.RestoreEntity
):
"""Entity to represent wake word sound setting."""
entity_description = SelectEntityDescription(
key="wake_word_sound",
translation_key="wake_word_sound",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_current_option = "havpe"
_attr_options = ["none", "alexa", "havpe", "ding", "bubble"]
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and state.state in self.options:
await self.async_select_option(state.state)
async def async_select_option(self, option: str) -> None:
"""Select an option."""
self._attr_current_option = option
self.async_write_ha_state()
self._device.set_custom_setting("wake_word_sound", option)
class WyomingSatelliteScreenTimeoutSelect(
VASatelliteEntity, SelectEntity, restore_state.RestoreEntity
):
"""Entity to represent screen timeout setting."""
entity_description = SelectEntityDescription(
key="screen_timeout",
translation_key="screen_timeout",
entity_category=EntityCategory.CONFIG,
)
_attr_should_poll = False
_attr_current_option = "60"
_attr_options = ["15", "30", "60", "120", "300", "600", "1800"]
async def async_added_to_hass(self) -> None:
"""When entity is added to Home Assistant."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state is not None and state.state in self.options:
await self.async_select_option(state.state)
async def async_select_option(self, option: str) -> None:
"""Select an option."""
self._attr_current_option = option
self.async_write_ha_state()
self._device.set_custom_setting(self.entity_description.key, int(option))
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
msp1974/ViewAssist_Companion_App | https://github.com/msp1974/ViewAssist_Companion_App/blob/5f4533c753120a8b59087b19fda2b26c42284aa7/custom_components/vaca/custom.py | custom_components/vaca/custom.py | """# Custom components for View Assist satellite integration with Wyoming events."""
from dataclasses import dataclass
from enum import StrEnum
import logging
from typing import Any
from awesomeversion import AwesomeVersion
from wyoming.event import Event, Eventable
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from homeassistant.loader import async_get_integration
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
_CUSTOM_EVENT_TYPE = "custom-event"
_PIPELINE_ENDED_EVENT_TYPE = "pipeline-ended"
ACTION_EVENT_TYPE = "action"
CAPABILITIES_EVENT_TYPE = "capabilities"
SETTINGS_EVENT_TYPE = "settings"
STATUS_EVENT_TYPE = "status"
class CustomActions(StrEnum):
"""Actions for media control."""
MEDIA_PLAY_MEDIA = "play-media"
MEDIA_PLAY = "play"
MEDIA_PAUSE = "pause"
MEDIA_STOP = "stop"
MEDIA_SET_VOLUME = "set-volume"
REFRESH = "refresh"
SCREEN_SLEEP = "screen-sleep"
SCREEN_WAKE = "screen-wake"
TOAST_MESSAGE = "toast-message"
WAKE = "wake"
@dataclass
class PipelineEnded(Eventable):
"""Event triggered when a pipeline ends."""
@staticmethod
def is_type(event_type: str) -> bool:
"""Check if the event type matches."""
return event_type == _PIPELINE_ENDED_EVENT_TYPE
def event(self) -> Event:
"""Create an event for the pipeline ended."""
return Event(type=_PIPELINE_ENDED_EVENT_TYPE)
@staticmethod
def from_event(event: Event) -> "PipelineEnded":
"""Create a PipelineEnded instance from an event."""
return PipelineEnded()
@dataclass
class CustomEvent(Eventable):
"""Custom event class."""
event_type: str
"""Type of the event."""
event_data: dict[str, Any] | None = None
"""Data associated with the event."""
@staticmethod
def is_type(event_type: str) -> bool:
"""Check if the event type matches."""
return event_type == _CUSTOM_EVENT_TYPE
def event(self) -> Event:
"""Create an event for the custom event."""
data = {"event_type": self.event_type}
if self.event_data is not None:
data.update(self.event_data)
return Event(
type=_CUSTOM_EVENT_TYPE,
data=data,
)
@staticmethod
def from_event(event: Event) -> "CustomEvent":
"""Create a CustomEvent instance from an event."""
return CustomEvent(
event_type=event.data.get("event_type", "unknown"),
event_data=event.data.get("data"),
)
async def getIntegrationVersion(hass: HomeAssistant) -> str | AwesomeVersion | None:
"""Get the integration version."""
integration = await async_get_integration(hass, DOMAIN)
return integration.version if integration else "0.0.0"
def getVADashboardPath(hass: HomeAssistant, uuid: str) -> str:
"""Get the dashboard path."""
# Look for VA and a config entry that uses this uuid for display. Then get the dashboard path
# from it or the master entry. If not set, return empty string
if entries := hass.config_entries.async_entries(
"view_assist", include_disabled=False
):
entity_reg = er.async_get(hass)
for entry in entries:
try:
if entry.data["type"] == "vaca":
if mic_device := entry.data.get("mic_device", {}):
# Get device id for this entity
if mic_device_entity := entity_reg.async_get(mic_device):
entry_id = mic_device_entity.config_entry_id
if entry_id == uuid:
if home := entry.options.get("home"):
return home
# Look for master entry
for master_entry in entries:
if master_entry.data["type"] == "master_config":
if home := master_entry.options.get("home"):
return home
return "view-assist"
except Exception as e: # noqa: BLE001
_LOGGER.error("Error getting dashboard path: %s", e)
continue
return ""
| python | Apache-2.0 | 5f4533c753120a8b59087b19fda2b26c42284aa7 | 2026-01-05T07:13:44.720990Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.