repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
NMTGMinor | NMTGMinor-master/onmt/legacy/UniversalTransformer/Layers.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class UniversalEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
position encoder: adding embedding based on position
time encoder: adding embedding based on time (the loop)
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, pos_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalEncoderLayer, self).__init__()
self.version = version
# position and time embedding is added into the input before the layer
self.pos_encoder = pos_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, t, pad_mask=None):
# apply layer normalization
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.pos_encoder(query) + self.time_encoder(t)
out, _ = self.multihead(query, query, query, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),
mask=pad_mask)
input = self.postprocess_ffn(out, input)
return input
class UniversalDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, position_encoder, time_encoder, attn_p=0.1, version=1.0):
super(UniversalDecoderLayer, self).__init__()
self.version = version
self.position_encoder = position_encoder
self.time_encoder = time_encoder
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, static=onmt.constants.static)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
#~ print(input.size())
#~ print(context.size())
#~ print(pad_mask_tgt.size())
query = self.preprocess_attn(input)
# add position encoding and time encoding
query = self.position_encoder(query) + self.time_encoder(t)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage
def step(self, input, context, pos_step, t, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input, mask=pad_mask_tgt)
# add position encoding and time encoding (before the buffer because the previous steps are already added)
query = self.position_encoder(query, t=pos_step) + self.time_encoder(t)
if buffer is not None:
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
out, _ = self.multihead_tgt(query, buffer, buffer, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, context, mask_src,
query_mask=pad_mask_tgt, value_mask=None)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
class TimeEncoding(nn.Module):
"""Adds positional embeddings to standard word embeddings
This matches the original TensorFlow implementation at https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py.
Args:
d_model: dimension of model
p: dropout probability
len_max: max seq length for pre-calculated positional embeddings
Inputs Shapes:
word_emb: batch_size x len_seq x d_model
Outputs Shapes:
out: batch_size x len_seq x d_model
"""
def __init__(self, d_model, p=0, len_max=64):
# save a fixed positional embedding matrix up to len_max,
# so that no need to recreate it everytime
super(TimeEncoding , self).__init__()
self.len_max=len_max
self.d_model = d_model
self.renew(len_max)
self.p = p
def renew(self, new_max_len):
## detele the old variable to avoid Pytorch's error when register new buffer
if hasattr(self, 'time_emb'):
del self.time_emb
times = torch.arange(0,new_max_len).float()
num_timescales = self.d_model // 2
log_timescale_increment = math.log(10000) / (num_timescales-1)
inv_timescales = torch.exp(torch.arange(0, num_timescales).float() * -log_timescale_increment)
scaled_time = times.unsqueeze(1) * inv_timescales.unsqueeze(0)
time_emb = torch.cat((torch.sin(scaled_time), torch.cos(scaled_time)), 1)
# wrap in a buffer so that model can be moved to GPU
self.register_buffer('time_emb', time_emb)
def forward(self, t):
# print('hello')
# out = word_emb + Variable(self.pos_emb[:len_seq, :][-1, :], requires_grad=False)
time_emb = Variable(self.time_emb[t, :], requires_grad=False) # 1 x dim
# out should have size 1 x 1 x dim
# all positions share the time embedding
# all batch elements share the time embedding
out = time_emb.unsqueeze(0)
return out
| 11,195 | 37.740484 | 156 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/UniversalTransformer/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.UniversalTransformer.Layers import UniversalDecoderLayer, UniversalEncoderLayer
#~ from onmt.modules.ParallelTransformer.Layers import ParallelEncoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.modules.Checkpoint import checkpoint
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from torch.autograd import Variable
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class UniversalTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder, time_encoder):
super(UniversalTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.time_encoder = time_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.recurrent_layer = UniversalEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.positional_encoder, self.time_encoder, self.attn_dropout)
#~ self.layer_modules = nn.ModuleList([ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
#~ emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for t in range(self.layers):
context = self.recurrent_layer(context, mask_src, t, pad_mask) # batch_size x len_src x d_model
#~ for i, layer in enumerate(self.layer_modules):
#~
#~
#~ if len(self.layer_modules) - i <= onmt.Constants.checkpointing and self.training:
#~ context, norm_input = checkpoint(custom_layer(layer), context, mask_src, pad_mask)
#~
#~ print(type(context))
#~ else:
#~ context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
#~
#~ if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
#~ memory_bank.append(norm_input)
#~
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
return context, mask_src
class UniversalTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder, time_encoder):
super(UniversalTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.positional_encoder = positional_encoder
self.time_encoder = time_encoder
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.recurrent_layer = UniversalDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.positional_encoder, self.time_encoder, self.attn_dropout)
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def mark_pretrained(self):
self.pretrained_point = self.layers
def forward(self, input, context, src, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
#~ if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
#~ """ Adding positional encoding """
#~ emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
#~ memory_bank = None
for t in range(self.layers):
output, coverage = self.recurrent_layer(output, context, t, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
#~ for i, layer in enumerate(self.layer_modules):
#~ if len(self.layer_modules) - i <= onmt.Constants.checkpointing and self.training:
#~
#~ output, coverage = checkpoint(custom_layer(layer), output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
#~
#~ else:
#~ output, coverage = layer(output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context.transpose(0, 1)
buffer = decoder_state.buffer
src = decoder_state.src.transpose(0, 1)
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
output_buffer = list()
batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
#~ if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
#~ if self.time == 'positional_encoding':
#~ emb = self.time_transformer(emb, t=input.size(1))
pos_step = input.size(1)
# emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for t in range(self.layers):
buffer_ = buffer[t] if buffer is not None else None
assert(output.size(1) == 1)
output, coverage, buffer_ = self.recurrent_layer.step(output, context, pos_step, t, mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
#~ for i, layer in enumerate(self.layer_modules):
#~
#~ buffer_ = buffer[i] if buffer is not None else None
#~ assert(output.size(1) == 1)
#~ output, coverage, buffer_ = layer.step(output, context[i], mask_tgt, mask_src,
#~ pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
#~
#~ output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
decoder_state._update_state(buffer)
return output, coverage
| 13,602 | 38.428986 | 178 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/ParallelTransformer/Layers.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class ParallelEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0):
super(ParallelEncoderLayer, self).__init__()
self.version = version
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, pad_mask=None, residual_dropout=0.0):
query = self.preprocess_attn(input)
out, _ = self.multihead(query, query, query, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
if residual_dropout > 0:
input_ = F.dropout(input, residual_dropout, self.training, False)
input = self.postprocess_attn(out, input_, mask=pad_mask)
#~ input = self.postprocess_attn(out) + input
else:
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input),
mask=pad_mask)
input = self.postprocess_ffn(out, input)
# return the query which is the normalized input
return input, query
#~
#~ class ParallelDecoderLayer(nn.Module):
#~ """Wraps multi-head attentions and position-wise feed forward into one layer of decoder
#~
#~ Args:
#~ h: number of heads
#~ d_model: dimension of model
#~ p: dropout probabolity
#~ d_ff: dimension of feed forward
#~
#~ Params:
#~ multihead_tgt: multi-head self attentions layer
#~ multihead_src: multi-head encoder-decoder attentions layer
#~ feedforward: feed forward layer
#~
#~ Input Shapes:
#~ query: batch_size x len_query x d_model
#~ key: batch_size x len_key x d_model
#~ value: batch_size x len_key x d_model
#~ context: batch_size x len_src x d_model
#~ mask_tgt: batch_size x len_query x len_key or broadcastable
#~ mask_src: batch_size x len_query x len_src or broadcastable
#~
#~ Output Shapes:
#~ out: batch_size x len_query x d_model
#~ coverage: batch_size x len_query x len_key
#~
#~ """
#~
#~ def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
#~ super(FCTDecoderLayer, self).__init__()
#~
#~ self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~ self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~ self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
#~ self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~
#~
#~ self.multihead_tgt = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_tgt = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_tgt = FlatSumMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = FlatSumMultiHeadAttention(h, d_model, attn_p=attn_p)
#~
#~ if onmt.Constants.activation_layer == 'linear_relu_linear':
#~ ff_p = p
#~ feedforward = FeedForward(d_model, d_ff, ff_p)
#~ elif onmt.Constants.activation_layer == 'maxout':
#~ k = int(math.ceil(d_ff / d_model))
#~ feedforward = MaxOut(d_model, d_model, k)
#~ self.feedforward = Bottle(feedforward)
#~
#~
#~ def forward(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
#~
#~ """ Self attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_attn(input, mask=pad_mask_tgt)
#~
#~ if memory_bank is None:
#~ memory_bank = query.unsqueeze(0)
#~
#~ else:
#~ memory_bank = query.unsqueeze(0)
#~ memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # n_layer x batch_size x len_src x hidden
#~
#~
#~ out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
#~ query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
#~
#~ input = self.postprocess_attn(out, input)
#~
#~ """ Context Attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
#~ out, coverage = self.multihead_src(query, context, mask_src,
#~ query_mask=pad_mask_tgt, value_mask=pad_mask_src)
#~ input = self.postprocess_src_attn(out, input)
#~
#~ """ Feed forward layer
#~ layernorm > ffn > dropout > residual
#~ """
#~ out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
#~ mask=pad_mask_tgt)
#~ input = self.postprocess_ffn(out, input)
#~
#~ return input, memory_bank, coverage
#~
#~
#~ def step(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
#~
#~ query = self.preprocess_attn(input, mask=pad_mask_tgt)
#~
#~ if buffer is not None:
#~ buffer = torch.cat([buffer, query], dim=1)
#~ else:
#~ buffer = query
#~
#~ if memory_bank is None:
#~ memory_bank = buffer.unsqueeze(0)
#~
#~ else:
#~ memory_bank = torch.cat([memory_bank, buffer.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
#~
#~
#~ out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
#~ query_mask=None, value_mask=None)
#~
#~ input = self.postprocess_attn(out, input)
#~
#~ """ Context Attention layer
#~ layernorm > attn > dropout > residual
#~ """
#~
#~ query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
#~ out, coverage = self.multihead_src(query, context, mask_src,
#~ query_mask=None, value_mask=None)
#~ input = self.postprocess_src_attn(out, input)
#~
#~ """ Feed forward layer
#~ layernorm > ffn > dropout > residual
#~ """
#~ out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
#~ mask=pad_mask_tgt)
#~ input = self.postprocess_ffn(out, input)
#~
#~ return input, memory_bank, coverage, buffer
| 9,252 | 40.124444 | 123 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/ParallelTransformer/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.legacy.ParallelTransformer.Layers import ParallelEncoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.modules.Checkpoint import checkpoint
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from torch.autograd import Variable
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class ParallelTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder):
super(ParallelTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
if hasattr(opt, 'grow_dropout'):
self.grow_dropout = opt.grow_dropout
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
#~ self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def add_layers(self, n_new_layer):
self.new_modules = list()
self.layers += n_new_layer
for i in range(n_new_layer):
layer = ParallelEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout)
# the first layer will use the preprocessing which is the last postprocessing
if i == 0:
layer.preprocess_attn.load_state_dict(self.postprocess_layer.state_dict())
#~ layer.preprocess_attn.layer_norm.function.weight.requires_grad = False
#~ layer.preprocess_attn.layer_norm.function.bias.requires_grad = False
#~ if hasattr(layer.postprocess_attn, 'k'):
#~ layer.postprocess_attn.k.data.fill_(0.01)
# replace the last postprocessing layer with a new one
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.layer_modules.append(layer)
def mark_pretrained(self):
self.pretrained_point = self.layers
def forward(self, input, grow=False):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
if grow:
return self.forward_grow(input)
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context, norm_input = checkpoint(custom_layer(layer), context, mask_src, pad_mask)
#~ print(type(context))
else:
context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
memory_bank.append(norm_input)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank.append(context)
memory_bank = torch.stack(memory_bank)
return memory_bank, mask_src
def forward_grow(self, input):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
with torch.no_grad():
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = list()
for i in range(self.pretrained_point):
layer = self.layer_modules[i]
context, norm_input = layer(context, mask_src, pad_mask) # batch_size x len_src x d_model
if i > 0: # don't keep the norm input of the first layer (a.k.a embedding)
memory_bank.append(norm_input)
for i in range(self.layers - self.pretrained_point):
res_drop_rate = 0.0
if i == 0:
res_drop_rate = self.grow_dropout
layer = self.layer_modules[self.pretrained_point + i]
context, norm_input = layer(context, mask_src, pad_mask, residual_dropout=res_drop_rate) # batch_size x len_src x d_model
memory_bank.append(norm_input)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank.append(context)
memory_bank = torch.stack(memory_bank)
return memory_bank, mask_src
class ParallelTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(ParallelTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
if hasattr(opt, 'grow_dropout'):
self.grow_dropout = opt.grow_dropout
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
#~ self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=onmt.constants.static)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([DecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def mark_pretrained(self):
self.pretrained_point = self.layers
def add_layers(self, n_new_layer):
self.new_modules = list()
self.layers += n_new_layer
for i in range(n_new_layer):
layer = DecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout)
# the first layer will use the preprocessing which is the last postprocessing
if i == 0:
# layer.preprocess_attn = self.postprocess_layer
layer.preprocess_attn.load_state_dict(self.postprocess_layer.state_dict())
#~ layer.preprocess_attn.layer_norm.function.weight.requires_grad = False
#~ layer.preprocess_attn.layer_norm.function.bias.requires_grad = False
# replace the last postprocessing layer with a new one
#~ if hasattr(layer.postprocess_attn, 'k'):
#~ layer.postprocess_attn.k.data.fill_(0.01)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.layer_modules.append(layer)
def forward(self, input, context, src, grow=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
if grow:
return self.forward_grow(input, context, src)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
#~ memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, coverage = checkpoint(custom_layer(layer), output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
else:
output, coverage = layer(output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def forward_grow(self, input, context, src):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
with torch.no_grad():
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
for i in range(self.pretrained_point):
layer = self.layer_modules[i]
output, coverage = layer(output, context[i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
for i in range(self.layers - self.pretrained_point):
res_drop_rate = 0.0
if i == 0:
res_drop_rate = self.grow_dropout
layer = self.layer_modules[self.pretrained_point + i]
output, coverage = layer(output, context[self.pretrained_point + i], mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src, residual_dropout=res_drop_rate) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
#~ def step(self, input, context, src, buffer=None):
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
# note: transpose 1-2 because the first dimension (0) is the number of layer
context = decoder_state.context.transpose(1, 2)
buffer = decoder_state.buffer
src = decoder_state.src.transpose(0, 1)
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
output_buffer = list()
batch_size = input.size(0)
input_ = input[:,-1].unsqueeze(1)
# print(input_.size())
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = self.time_transformer(emb, t=input.size(1))
else:
prev_h = buffer[0] if buffer is None else None
emb = self.time_transformer(emb, prev_h)
buffer[0] = emb[1]
if isinstance(emb, tuple):
emb = emb[0] # emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
buffer_ = buffer[i] if buffer is not None else None
assert(output.size(1) == 1)
output, coverage, buffer_ = layer.step(output, context[i], mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
decoder_state._update_state(buffer)
return output, coverage
class ParallelTransformerDecodingState(DecoderState):
def __init__(self, src, context, beamSize=1):
self.src = src
self.context = context
self.beamSize = beamSize
self.buffer = None
self.input_seq = None
self.context = context.transpose(1, 2)
self.context = Variable(self.context.data.repeat(1, 1, beamSize, 1))
def _update_state(self, buffer):
self.buffer = buffer
def _update_beam(self, beam, b, remainingSents, idx):
for tensor in [self.src, self.input_seq] :
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beamSize, remainingSents)[:, :, idx]
if isinstance(tensor, Variable):
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
else:
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
nl, br_, t_, d_ = self.buffer.size()
sent_states = self.buffer.view(nl, self.beamSize, remainingSents, t_, d_)[:, :, idx, :, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def _prune_complete_beam(self, activeIdx, remainingSents):
model_size = self.context.size(-1)
def updateActive4D_time_first(t):
# select only the remaining active sentences
nl, t_, br_, d_ = t.size()
view = t.data.view(nl, t_, -1, remainingSents, model_size)
newSize = list(t.size())
newSize[2] = newSize[2] * len(activeIdx) // remainingSents
return Variable(view.index_select(3, activeIdx)
.view(*newSize))
def updateActive2D(t):
if isinstance(t, Variable):
# select only the remaining active sentences
view = t.data.view(-1, remainingSents)
newSize = list(t.size())
newSize[-1] = newSize[-1] * len(activeIdx) // remainingSents
return Variable(view.index_select(1, activeIdx)
.view(*newSize))
else:
view = t.view(-1, remainingSents)
newSize = list(t.size())
newSize[-1] = newSize[-1] * len(activeIdx) // remainingSents
new_t = view.index_select(1, activeIdx).view(*newSize)
return new_t
def updateActive4D(t):
# select only the remaining active sentences
nl, br_, t_, d_ = t.size()
view = t.data.view(nl, -1, remainingSents, t_, model_size)
newSize = list(t.size())
newSize[1] = newSize[1] * len(activeIdx) // remainingSents
return Variable(view.index_select(2, activeIdx)
.view(*newSize))
self.context = updateActive4D_time_first(self.context)
self.input_seq = updateActive2D(self.input_seq)
self.src = updateActive2D(self.src)
self.buffer = updateActive4D(self.buffer)
| 25,098 | 39.417069 | 175 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/distance_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import LearnableRelMultiHeadAttn
class DistanceTransformerEncoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, variational=False, death_rate=0.0,
max_len=64, **kwargs):
super(DistanceTransformerEncoderLayer, self).__init__()
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
# self.multihead = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
d_head = d_model // h
self.multihead = LearnableRelMultiHeadAttn(h, d_model, d_head, dropatt=attn_p, max_len=max_len)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p, variational=self.variational)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input, attn_mask, incremental=False, incremental_cache=None, mems=None):
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
out, _, incremental_cache = self.multihead(query, attn_mask=attn_mask, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class DistanceTransformerDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0, max_len=64):
super(DistanceTransformerDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p, share=2)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = LearnableRelMultiHeadAttn(h, d_model, d_head, dropatt=attn_p, max_len=64)
# self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, share=1)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
# print(query.size(), pos_emb.size(), mask_tgt.size(), mems.size() if mems is not None else 0)
out, _, = self.multihead_tgt(query, attn_mask=mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
def step(self, input, context, mask_tgt, mask_src, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt.step(query, attn_mask=mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage, buffer = self.multihead_src.step(query, context, context, mask_src, buffer=buffer)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
| 9,073 | 40.43379 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/relative_unified_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, LearnablePostionEmbedding, \
StreamState, StreamDecodingState
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
torch.set_printoptions(profile="full")
def seperate_tensor(input, lengths):
bsz, tgt_len = input.size(1), input.size(0)
assert (bsz == 1)
outputs = list()
# starting from the first position of the tensor
offset = 0
for length in lengths:
segment = input.narrow(0, offset, length)
offset += length
outputs.append(segment)
return outputs
class RelativeUnifiedTransformer(UnifiedTransformer):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_memory_size = opt.max_memory_size
# build_modules will be called from the inherited constructor
super(RelativeUnifiedTransformer, self).__init__(opt, tgt_embedding, src_embedding,
generator, positional_encoder,
language_embeddings=language_embeddings,
encoder_type=encoder_type)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# learnable position encoding
if self.learnable_position_encoding:
self.max_pos_length = opt.max_pos_length
# pos_emb = self.model_size // self.n_heads
pos_emb = self.model_size
self.positional_encoder = LearnablePostionEmbedding(self.max_pos_length, pos_emb)
print("* Learnable position encoding with max %d positions" % self.max_pos_length)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def gen_mask(self, src, tgt):
# generate the mask for the mini-batch data
# both src and tgt are T x B
input_seq = torch.cat([src, tgt], dim=0)
seq_len = input_seq.size(0)
if self.bidirectional:
bsz, src_len = src.size(1), src.size(0)
tgt_len = tgt.size(0)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool().unsqueeze(-1)
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0)
attn_mask = attn_mask | pad_mask
else:
attn_mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1).bool().unsqueeze(-1) # T x T x -1
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0) # 1 x T x B
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = attn_mask | pad_mask
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = RelativeTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def create_mask_stream(self, src, tgt, src_lengths, tgt_lengths, mem_length=0):
if self.bidirectional:
mask = None
prev_length = 0
# go through the src and tgt lengths to create mask
for i, (src_len, tgt_len) in enumerate(zip(src_lengths, tgt_lengths)):
# print("Step ", i, src_len, tgt_len)
# first, the source sentence should have full bidirectional attention to the end of itself
src_mask = src.new_zeros(src_len, src_len + prev_length)
if prev_length == 0:
mask = src_mask
else:
# everything in the past doesn't look at the future
prev_mask = src.new_ones(prev_length, src_len)
if mask is not None:
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (src_len + prev_length)
else:
mask = prev_mask
mask = torch.cat([mask, src_mask], dim=0) # (src_len + prev_length) x (src_len + prev_length)
prev_length += src_len
# the target sentence
# everything in the past doesn't look at the future
prev_mask = tgt.new_ones(prev_length, tgt_len)
# the target has unidirectional attention towards everything in the past
mlen = prev_length
qlen = tgt_len
klen = qlen + mlen
tgt_mask = torch.triu(tgt.new_ones(qlen, klen), diagonal=1 + mlen)
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (prev_len + tgt_len)
mask = torch.cat([mask, tgt_mask], dim=0) #
prev_length += tgt_len
if mem_length > 0:
past_mask = src.new_zeros(prev_length, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
else:
seq_len = sum(src_lengths) + sum(tgt_lengths)
mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1)
if mem_length > 0:
past_mask = src.new_zeros(seq_len, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
return attn_mask
def forward_stream(self, batch, **kwargs):
streaming_state = kwargs.get('streaming_state', None)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # (len_tgt x batch_size) x 1
bsz = src.size(1)
assert bsz == 1
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
# First: separate the input tensor into segments
src_segments = seperate_tensor(src, src_lengths)
tgt_segments = seperate_tensor(tgt, tgt_lengths)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
# Second: Embedding
src_embeddings = []
for src_segment in src_segments:
src_emb = F.embedding(
src_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
src_embeddings.append(src_emb)
tgt_embeddings = []
for tgt_segment in tgt_segments:
tgt_emb = F.embedding(
tgt_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
tgt_embeddings.append(tgt_emb)
# add src1, tgt1, src2, tgt2 .... srcn, tgtn
all_embeddings = []
for (src_emb, tgt_emb) in zip(src_embeddings, tgt_embeddings):
all_embeddings.append(src_emb)
all_embeddings.append(tgt_emb)
emb = torch.cat(all_embeddings, dim=0)
# prepare attention mask
mem_length = streaming_state.prev_tgt_mem_size
attn_mask = self.create_mask_stream(src, tgt, src_lengths, tgt_lengths, mem_length=mem_length)
klen = emb.size(0) + mem_length
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = streaming_state.tgt_buffer[i]
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
# context and context_mask are None
streaming_state.tgt_buffer[i] = buffer
# final layer norm
output = self.postprocess_layer(output)
# update the memory and then prune
streaming_state.prev_tgt_mem_size += klen
streaming_state.prune_target_memory(self.max_memory_size)
# now we have to separate the target states from the "output" to generate translations
target_outputs = []
contexts = []
offset = 0
for (src_len, tgt_len) in zip(src_lengths, tgt_lengths):
source_output = output.narrow(0, offset, src_len)
offset += src_len
target_output = output.narrow(0, offset, tgt_len)
offset += tgt_len
target_outputs.append(target_output)
contexts.append(source_output)
context = torch.cat(contexts, dim=0)
output = torch.cat(target_outputs, dim=0)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': None}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
output_dict['streaming_state'] = streaming_state
return output_dict
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
if streaming:
return self.forward_stream(batch, **kwargs)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # len_tgt x batch_size
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
src_emb = F.embedding(
src, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
tgt_emb = F.embedding(
tgt, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
klen = src_len + tgt_len
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, None, pos_emb, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
input = input.transpose(0, 1)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
emb = src_emb
src_len = input.size(0)
bsz = input.size(1)
mask_src_src = input.eq(onmt.constants.PAD).byte() # B x 1 x src_len
mask_src = mask_src_src.unsqueeze(0)
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output)
klen = src_len
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output, decoder_state
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state):
src = decoder_state.src if decoder_state.src is not None else None
tgt = input.transpose(0, 1)
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffers = decoder_state.attention_buffers
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
# src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
# * math.sqrt(self.model_size)
input_ = tgt[-1:]
tgt_emb = embedded_dropout(self.tgt_embedding, input_, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
# src_lang_emb = self.language_embeddings(src_lang)
# src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
# emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
emb = tgt_emb
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# last attn_mask step
attn_mask = attn_mask[-1:, :, :]
klen = src_len + tgt_len
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True,
incremental_cache=buffer) # context and context_mask are None
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
# output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type)
# forward pass through the input to get the buffer
# src_transposed = src_transposed.repeat(beam_size, 1)
encoder_output, decoder_state = self.encode(src_transposed, decoder_state, input_pos=src_pos,
input_lang=src_lang)
decoder_state.src_lang = src_lang
buffers = decoder_state.attention_buffers
bsz = src.size(1)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src.device)
for l in buffers:
buffer_ = buffers[l]
if buffer_ is not None:
for k in buffer_.keys():
t_, br_, d_ = buffer_[k].size()
buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = StreamState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
| 24,270 | 36.982786 | 116 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/memory_transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.bottle import Bottle
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.legacy.old_models.unified_transformer import UnifiedTransformer
from onmt.models.relative_transformer import SinusoidalPositionalEmbedding, LearnablePostionEmbedding, \
StreamState, StreamDecodingState
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
def seperate_tensor(input, lengths):
bsz, tgt_len = input.size(1), input.size(0)
assert (bsz == 1)
outputs = list()
# starting from the first position of the tensor
offset = 0
for length in lengths:
segment = input.narrow(0, offset, length)
offset += length
outputs.append(segment)
return outputs
class MemoryTransformerDecoderLayer(nn.Module):
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, version=1.0, ignore_source=False,
variational=False, death_rate=0.0):
super(MemoryTransformerDecoderLayer, self).__init__()
self.version = version
self.ignore_source = ignore_source
self.variational = variational
self.death_rate = death_rate
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
d_head = d_model // h
self.multihead_tgt = RelPartialLearnableMultiHeadAttn(h, d_model, d_head, dropatt=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, variational=self.variational)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
elif onmt.constants.activation_layer == 'linear_swish_linear':
ff_p = p
feedforward = FeedForwardSwish(d_model, d_ff, ff_p)
else:
raise NotImplementedError
self.feedforward = Bottle(feedforward)
def forward(self, input_, context, pos_emb, mask_tgt, mask_src, mems=None,
incremental=False, incremental_cache=None):
# incremental=False, incremental_cache=None, reuse_source=True):
""" Self attention layer with memory
layernorm > attn > dropout > residual
"""
assert context is None, "This model does not have an context encoder"
coin = True
if self.training and self.death_rate > 0:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
# input and context should be time first ?
query = self.preprocess_attn(input_)
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
# out, _ = self.multihead_tgt(query, pos_emb, r_w_bias, r_r_bias, attn_mask=mask_tgt)
out, _, incremental_cache = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_attn(out, input_)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input_))
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input_ = self.postprocess_ffn(out, input_)
else:
coverage = None
if incremental:
return input_, coverage, incremental_cache
return input_, coverage
def step(self, input, context, pos_emb, mask_tgt, mask_src, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt(query, pos_emb, attn_mask=mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
class MemoryTransformer(UnifiedTransformer):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
self.learnable_position_encoding = opt.learnable_position_encoding
self.max_memory_size = opt.max_memory_size
self.mem_len = self.max_memory_size
self.dictionary = kwargs.get('dictionary', None)
# build_modules will be called from the inherited constructor
super(MemoryTransformer, self).__init__(opt, tgt_embedding, src_embedding,
generator, positional_encoder,
language_embeddings=language_embeddings,
encoder_type=encoder_type)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# learnable position encoding
if self.learnable_position_encoding:
self.max_pos_length = opt.max_pos_length
# pos_emb = self.model_size // self.n_heads
pos_emb = self.model_size
self.positional_encoder = LearnablePostionEmbedding(self.max_pos_length, pos_emb)
print("* Learnable position encoding with max %d positions" % self.max_pos_length)
else:
# or using pre-set sinusoidal
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
def gen_mask(self, src, tgt):
# generate the mask for the mini-batch data
# both src and tgt are T x B
input_seq = torch.cat([src, tgt], dim=0)
seq_len = input_seq.size(0)
if self.bidirectional:
bsz, src_len = src.size(1), src.size(0)
tgt_len = tgt.size(0)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool().unsqueeze(-1)
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0)
attn_mask = attn_mask | pad_mask
else:
attn_mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1).bool().unsqueeze(-1) # T x T x -1
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(0) # 1 x T x B
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = attn_mask | pad_mask
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = MemoryTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
ignore_source=True,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def create_mask_stream(self, src, tgt, src_lengths, tgt_lengths, mem_length=0):
if self.bidirectional:
mask = None
prev_length = 0
# go through the src and tgt lengths to create mask
for i, (src_len, tgt_len) in enumerate(zip(src_lengths, tgt_lengths)):
# print("Step ", i, src_len, tgt_len)
# first, the source sentence should have full bidirectional attention to the end of itself
src_mask = src.new_zeros(src_len, src_len + prev_length)
if prev_length == 0:
mask = src_mask
else:
# everything in the past doesn't look at the future
prev_mask = src.new_ones(prev_length, src_len)
if mask is not None:
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (src_len + prev_length)
else:
mask = prev_mask
mask = torch.cat([mask, src_mask], dim=0) # (src_len + prev_length) x (src_len + prev_length)
prev_length += src_len
# the target sentence
# everything in the past doesn't look at the future
prev_mask = tgt.new_ones(prev_length, tgt_len)
# the target has unidirectional attention towards everything in the past
mlen = prev_length
qlen = tgt_len
klen = qlen + mlen
tgt_mask = torch.triu(tgt.new_ones(qlen, klen), diagonal=1 + mlen)
mask = torch.cat([mask, prev_mask], dim=1) # prev_len x (prev_len + tgt_len)
mask = torch.cat([mask, tgt_mask], dim=0) #
prev_length += tgt_len
if mem_length > 0:
past_mask = src.new_zeros(prev_length, mem_length)
mask = torch.cat([past_mask, mask], dim=1)
attn_mask = mask.bool().unsqueeze(-1)
else:
seq_len = sum(src_lengths) + sum(tgt_lengths)
# mask = torch.triu(src.new_ones(seq_len, seq_len), diagonal=1)
# if mem_length > 0:
# past_mask = src.new_zeros(seq_len, mem_length)
# mask = torch.cat([past_mask, mask], dim=1)
mask = torch.triu(src.new_ones(seq_len, seq_len + mem_length), diagonal=1 + mem_length)
attn_mask = mask.bool().unsqueeze(-1)
return attn_mask
def forward_stream(self, batch, **kwargs):
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.mems
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # (len_tgt x batch_size) x 1
bsz = src.size(1)
assert bsz == 1
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_lengths = batch.src_lengths
tgt_lengths = batch.tgt_lengths
# First: separate the input tensor into segments
src_segments = seperate_tensor(src, src_lengths)
tgt_segments = seperate_tensor(tgt, tgt_lengths)
# if self.dictionary is not None:
# for src_, tgt_ in zip(src_segments, tgt_segments):
# src_ = src_.squeeze(1)
# tgt_ = tgt_.squeeze(1)
#
# src_words = " ".join(self.dictionary.convertToLabels(src_, onmt.constants.EOS))
# tgt_words = " ".join(self.dictionary.convertToLabels(tgt_, onmt.constants.EOS))
# print(src_words, tgt_words)
# input("Press any key to continue...")
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
# Second: Embedding
src_embeddings = []
for src_segment in src_segments:
src_emb = F.embedding(
src_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
src_embeddings.append(src_emb)
tgt_embeddings = []
for tgt_segment in tgt_segments:
tgt_emb = F.embedding(
tgt_segment, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
tgt_embeddings.append(tgt_emb)
# add src1, tgt1, src2, tgt2 .... srcn, tgtn
all_embeddings = []
for (src_emb, tgt_emb) in zip(src_embeddings, tgt_embeddings):
all_embeddings.append(src_emb)
all_embeddings.append(tgt_emb)
emb = torch.cat(all_embeddings, dim=0)
# prepare attention mask
mem_length = streaming_state.mems[0].size(0) if mems is not None else 0
attn_mask = self.create_mask_stream(src, tgt, src_lengths, tgt_lengths, mem_length=mem_length)
qlen = emb.size(0)
klen = emb.size(0) + mem_length
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
hids = [output]
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
mems_i = None if mems is None else mems[i]
output, coverage = layer(output, None, pos_emb, attn_mask, None, mems=mems_i)
# context and context_mask are None
hids.append(output)
# final layer norm
output = self.postprocess_layer(output)
# update the memory and then prune
streaming_state.update_mems(hids, qlen)
# now we have to separate the target states from the "output" to generate translations
target_outputs = []
contexts = []
offset = 0
for (src_len, tgt_len) in zip(src_lengths, tgt_lengths):
source_output = output.narrow(0, offset, src_len)
offset += src_len
target_output = output.narrow(0, offset, tgt_len)
offset += tgt_len
target_outputs.append(target_output)
contexts.append(source_output)
context = torch.cat(contexts, dim=0)
output = torch.cat(target_outputs, dim=0)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': None}
output_dict = defaultdict(lambda: None, output_dict)
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
output_dict['streaming_state'] = streaming_state
return output_dict
def forward(self, batch, target_mask=None, streaming=False, **kwargs):
if streaming:
return self.forward_stream(batch, **kwargs)
src = batch.get('source') # src_len x batch_size
tgt = batch.get('target_input') # len_tgt x batch_size
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
embed = self.src_embedding
if self.word_dropout > 0 and self.training:
mask = embed.weight.new().resize_((embed.weight.size(0), 1)). \
bernoulli_(1 - self.word_dropout).expand_as(embed.weight) / (1 - self.word_dropout)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
padding_idx = embed.padding_idx
if padding_idx is None:
padding_idx = -1
src_emb = F.embedding(
src, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
src_emb.mul_(math.sqrt(self.model_size))
tgt_emb = F.embedding(
tgt, masked_embed_weight, padding_idx, embed.max_norm,
embed.norm_type, embed.scale_grad_by_freq, embed.sparse)
tgt_emb.mul_(math.sqrt(self.model_size))
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=0) # L x batch_size x H
# prepare self-attention mask
attn_mask = self.gen_mask(src, tgt)
# pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
klen = src_len + tgt_len
if self.bidirectional:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
pos_emb = self.preprocess_layer(pos_emb)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage, _ = layer(output, None, pos_emb, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
input = input.transpose(0, 1)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb
emb = src_emb
src_len = input.size(0)
bsz = input.size(1)
mask_src_src = input.eq(onmt.constants.PAD).expand(src_len, src_len, bsz)
buffer = buffers[0] if 0 in buffers else None
if buffer is not None:
mem_len = buffer['k'].size(0)
else:
mem_len = 0
if mem_len > 0:
# print(mask_src_src.size())
past_mask = input.new_zeros(src_len, mem_len).bool().unsqueeze(-1).expand(src_len, mem_len, bsz)
mask_src_src = torch.cat([past_mask, mask_src_src], dim=1)
mask_src = mask_src_src
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
klen = src_len + mem_len
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
# if i == 0 and buffer is not None:
# key = next(iter(buffer))
# print(buffer[key].size())
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output, decoder_state
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state, **kwargs):
src = decoder_state.src if decoder_state.src is not None else None
tgt = input.transpose(0, 1)
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
buffers = decoder_state.attention_buffers
tgt_len = tgt.size(0)
src_len = src.size(0)
bsz = tgt.size(1)
# Embedding stage (and scale the embedding)
# src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
# * math.sqrt(self.model_size)
input_ = tgt[-1:]
tgt_emb = embedded_dropout(self.tgt_embedding, input_, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
# src_lang_emb = self.language_embeddings(src_lang)
# src_emb += src_lang_emb
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb
else:
raise NotImplementedError
# concatenate embedding
emb = tgt_emb
# prepare self-attention mask
# attn_mask = self.gen_mask(src, tgt)
buffer = buffers[0] if 0 in buffers else None
if buffer is not None:
mem_len = buffer['k'].size(0)
else:
mem_len = 0
qlen = tgt_len
klen = qlen + mem_len
attn_mask = torch.triu(emb.new_ones(qlen, klen), diagonal=1+mem_len).bool().unsqueeze(-1)
# last attn_mask step
attn_mask = attn_mask[-1:, :, :]
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
pos_emb = self.positional_encoder(pos)
output = emb
# Applying dropout
output = self.preprocess_layer(output)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer(output, None, pos_emb, attn_mask, None,
incremental=True,
incremental_cache=buffer) # context and context_mask are None
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
# output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
# pruning
max_mem_size = self.max_memory_size + tgt_len + 1
for i in range(self.layers):
buffer = buffers[i] if i in buffers else None
for k in buffer:
v = buffer[k]
buffer[k] = v[-max_mem_size:, :, :]
decoder_state.update_attention_buffer(buffer, i)
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=2, streaming=False, previous_decoding_state=None):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
if previous_decoding_state is None:
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type,
cloning=True)
else:
src = src.repeat(1, beam_size)
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size,
type=type, cloning=False)
decoder_state.attention_buffers = previous_decoding_state.attention_buffers
# forward pass through the input to get the buffer
src_transposed = src_transposed.repeat(beam_size, 1)
encoder_output, decoder_state = self.encode(src_transposed, decoder_state, input_pos=src_pos,
input_lang=src_lang)
decoder_state.src_lang = src_lang
# buffers = decoder_state.attention_buffers
# bsz = src.size(1)
# new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
# new_order = new_order.to(src.device)
#
# for l in buffers:
# buffer_ = buffers[l]
# if buffer_ is not None:
# for k in buffer_.keys():
# t_, br_, d_ = buffer_[k].size()
# buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight
def init_stream(self):
param = next(self.parameters())
layers = self.layers
streaming_state = MemoryState(layers, self.max_memory_size, param.device, param.dtype)
return streaming_state
def set_memory_size(self, src_memory_size, tgt_memory_size):
self.max_memory_size = src_memory_size + tgt_memory_size
class MemoryState(object):
def __init__(self, nlayers, mem_len, device, dtype):
self.mem_len = mem_len
self.mems = []
self.nlayers = nlayers
# n+1 memory slots (embeddings and n layers)
# but maybe we don't need to store the upper layer?
for i in range(self.nlayers + 1):
empty = torch.empty(0, dtype=dtype, device=device)
self.mems.append(empty)
def update_mems(self, hids, qlen):
# does not deal with None
if self.mems is None:
return None
mlen = self.mems[0].size(0) if self.mems is not None else 0
# mems is not None
assert len(hids) == len(self.mems), 'len(hids) != len(mems)'
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([self.mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
# Important:
self.mems = new_mems
# self.src_buffer = defaultdict(lambda: None)
# self.prev_src_mem_size = 0
# self.src_lengths = []
# self.tgt_buffer = defaultdict(lambda: None)
# self.prev_tgt_mem_size = 0
# self.tgt_lengths = []
#
# self.context_memory = None
# def init_mems(self):
# if self.mem_len > 0:
# mems = []
# param = next(self.parameters())
# for i in range(self.n_layer + 1):
# empty = torch.empty(0, dtype=param.dtype, device=param.device)
# mems.append(empty)
#
# return mems
# else:
# return None
| 32,849 | 37.06489 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/reformer.py | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. """
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from onmt.modules.lsh_attention import LSHSelfAttention
from onmt.models.transformers import PrePostProcessing
from onmt.modules.linear import FeedForward
from typing import Callable, Dict, List, Optional, Tuple
def apply_chunking_to_forward(
chunk_size: int, chunk_dim: int, forward_fn: Callable[..., torch.Tensor], *input_tensors
) -> torch.Tensor:
"""
This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size`
over the dimension `chunk_dim`.
It then applies a layer `forward_fn` to each chunk independently to save memory.
If the `forward_fn` is independent across the `chunk_dim` this function will yield the
same result as not applying it.
Args:
chunk_size: int - the chunk size of a chunked tensor. `num_chunks` = `len(input_tensors[0]) / chunk_size`
chunk_dim: int - the dimension over which the input_tensors should be chunked
forward_fn: fn - the forward fn of the model
input_tensors: tuple(torch.Tensor) - the input tensors of `forward_fn` which are chunked
Returns:
a Tensor with the same shape the foward_fn would have given if applied
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.chunk_size_lm_head, self.seq_len_dim, self.forward_chunk, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape
assert all(
input_tensor.shape == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
class ReformerEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
self.variational = opt.variational_dropout
self.death_rate = death_rate
d_model = opt.model_size
p = opt.dropout
super(ReformerEncoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', variational=self.variational)
self.self_attention = LSHSelfAttention(opt)
self.feedforward = FeedForward(opt.model_size, opt.inner_size, opt.dropout, opt.variational_dropout)
def forward(self, input, attn_mask):
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
out, _, _ = self.self_attention(query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input | 5,517 | 41.446154 | 122 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer_layers.py | import torch
import torch.nn as nn
import onmt
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Linear
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.relative_self_attention import RelativeSelfMultiheadAttn
from onmt.utils import flip
from onmt.modules.bottle import Bottle
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish, FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.relative_attention import RelPartialLearnableMultiHeadAttn
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.feed_forward import PositionWiseFeedForward
from onmt.modules.adaptive.relative_self_attention import AdaptiveRelativeAttn
from onmt.modules.adaptive.encdec_attention import AdaptiveEncDecAttn
from onmt.modules.adaptive.feed_forward import AdaptiveFeedForward
class RelativeUniversalEncoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__()
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
d_head = opt.model_size // opt.n_heads
self.adaptive_type = opt.adaptive
self.factor_size = opt.layers
# this model defaults as fast relative self attention
if self.adaptive_type == 'universal':
self.multihead = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.multihead = AdaptiveRelativeAttn(opt.model_size, opt.n_heads, self.factor_size, opt.attn_dropout)
self.feedforward = AdaptiveFeedForward(opt.model_size, opt.inner_size, self.factor_size,
opt.dropout, variational=self.variational)
def forward(self, input, pos_emb, layer_vector, attn_mask, incremental=False, incremental_cache=None, mems=None):
if self.adaptive_type == 'universal':
input = input + layer_vector
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
# if self.training and self.death_rate > 0:
# coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.adaptive_type == 'universal':
out, _ = self.multihead(query, pos_emb, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead(query, pos_emb, layer_vector, attn_mask, None, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
if self.adaptive_type == 'universal':
out = self.feedforward(self.preprocess_ffn(input))
else:
out = self.feedforward(self.preprocess_ffn(input), layer_vector)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
if incremental:
return input, incremental_cache
return input
class RelativeUniversalDecoderLayer(nn.Module):
def __init__(self, opt, death_rate=0.0):
super().__init__()
self.ignore_source = opt.ignore_source
self.variational = opt.variational_dropout
self.death_rate = death_rate
self.fast_self_attention = opt.fast_self_attention
self.factor_size = opt.layers
self.adaptive_type = opt.adaptive
self.preprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if not self.ignore_source:
self.preprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_src_attn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.adaptive_type == 'universal':
self.multihead_src = EncdecMultiheadAttn(opt.n_heads, opt.model_size, opt.attn_dropout)
else:
self.multihead_src = AdaptiveEncDecAttn(opt.n_heads, opt.model_size, self.factor_size, opt.attn_dropout)
self.preprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='n')
self.postprocess_ffn = PrePostProcessing(opt.model_size, opt.dropout, sequence='da',
variational=self.variational)
if self.adaptive_type == 'universal':
self.multihead_tgt = RelativeSelfMultiheadAttn(opt.model_size, opt.n_heads, opt.attn_dropout)
self.feedforward = PositionWiseFeedForward(opt.model_size, opt.inner_size, opt.dropout,
variational=self.variational)
else:
self.multihead_tgt = AdaptiveRelativeAttn(opt.model_size, opt.n_heads, self.factor_size,
opt.attn_dropout)
self.feedforward = AdaptiveFeedForward(opt.model_size, opt.inner_size, self.factor_size,
opt.dropout, variational=self.variational)
# def forward(self, input, context, pos_emb, r_w_bias, r_r_bias, mask_tgt, mask_src):
def forward(self, input, context, pos_emb, layer_vector, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True, mems=None):
# sum up input with the layer embedding
if self.adaptive_type == 'universal':
input = input + layer_vector
if incremental and incremental_cache is None:
incremental_cache = dict()
coin = True
if coin:
# input and context should be time first ?
if mems is not None and mems.size(0) > 0:
mems = self.preprocess_attn(mems)
else:
mems = None
query = self.preprocess_attn(input)
if self.adaptive_type == 'universal':
out, _ = self.multihead_tgt(query, pos_emb, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
else:
out, _ = self.multihead_tgt(query, pos_emb, layer_vector, None, mask_tgt, mems=mems,
incremental=incremental, incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
incremental_source = incremental and reuse_source
if self.adaptive_type == 'universal':
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
else:
out, coverage = self.multihead_src(query, context, context, layer_vector, mask_src,
incremental=incremental_source,
incremental_cache=incremental_cache)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
if self.adaptive_type == 'universal':
out = self.feedforward(self.preprocess_ffn(input))
else:
out = self.feedforward(self.preprocess_ffn(input), layer_vector)
# rescaling before residual
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
else:
coverage = None
return input, coverage, incremental_cache
| 10,170 | 45.231818 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/distance_transformer.py | import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.distance_transformer_layers import DistanceTransformerEncoderLayer, DistanceTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
class DistanceTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
# build_modules will be called from the inherited constructor
super(DistanceTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
# learnable position encoding
self.positional_encoder = None
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Encoder with Distance Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for _l in range(self.layers):
# linearly decay the death rate
death_r = (_l + 1.0) / self.layers * self.death_rate
block = DistanceTransformerEncoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
variational=self.varitional_dropout, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
bsz_first_input = input
input = input.transpose(0, 1)
# mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
dec_attn_mask = bsz_first_input.eq(onmt.constants.PAD).unsqueeze(1)
if streaming:
raise NotImplementedError
streaming_state = kwargs.get('streaming_state', None)
mems = streaming_state.src_mems
# mem_len = streaming_state.src_mems[0].size(0)
mem_len = streaming_state.prev_src_mem_size
input_length = kwargs.get('src_lengths', None)
streaming_state = kwargs.get('streaming_state', None)
mask_src = self.create_stream_mask(input, input_length, mem_len)
mask_src = mask_src.unsqueeze(2)
else:
mem_len = 0
mask_src = input.eq(onmt.constants.PAD).unsqueeze(0) # batch_size x src_len x 1 for broadcasting
mems = None
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.double_position:
assert input_pos is not None
# flatten
src_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.contiguous().view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_)
abs_pos = abs_pos.squeeze(1).view(src_len, bsz, -1)
else:
abs_pos = None
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
# There is no "unsqueeze" here because the input is T x B x H and lang_emb is B x H
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
else:
if streaming:
raise NotImplementedError
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).transpose(0, 1).eq(onmt.constants.PAD).unsqueeze(0)
dec_attn_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].transpose().unsqueeze(0)
dec_attn_mask = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
emb = emb.transpose(0, 1)
input = input.transpose(0, 1)
abs_pos = None
mem_len = 0
if onmt.constants.torch_version >= 1.2:
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
if self.double_position and abs_pos is not None:
# adding position encoding
emb = emb + abs_pos
""" Adding positional encoding """
qlen = input.size(0)
klen = qlen + mem_len
# Asynchronous positions: 2K+1 positions instead of K+1
# because the batch dimension is lacking
# B x T x H -> T x B x H
context = emb
# Apply dropout to both context and pos_emb
context = self.preprocess_layer(context)
for i, layer in enumerate(self.layer_modules):
# src_len x batch_size x d_model
if streaming:
buffer = streaming_state.src_buffer[i]
context, buffer = layer(context, mask_src, incremental=True, incremental_cache=buffer)
streaming_state.src_buffer[i] = buffer
else:
context = layer(context, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': dec_attn_mask, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class DistanceTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
# build_modules will be called from the inherited constructor
super(DistanceTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source,
allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# Parameters for the position biases
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_head))
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Distance Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = DistanceTransformerDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size, self.attn_dropout,
variational=self.variational_dropout, death_rate=death_r)
self.layer_modules.append(block)
def process_embedding(self, input, input_lang=None):
return input
def create_context_mask(self, input, src, src_lengths, tgt_lengths, extra_context_length=0):
"""
Generate the mask so that part of the target attends to a part of the source
:param extra_context_length:
:param input:
:param src:
:param src_lengths:
:param tgt_lengths:
:return:
"""
mask = None
if self.stream_context == 'global':
# Global context: one target attends to everything in the source
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current sent attend to current src sent and all src in the past
current_mask = input.new_zeros(tgt_length, src_length + prev_src_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
elif self.stream_context in ['local', 'limited']:
# Local context: only attends to the aligned context
for (src_length, tgt_length) in zip(src_lengths, tgt_lengths):
if mask is None:
prev_src_length = 0
prev_tgt_length = 0
else:
prev_src_length, prev_tgt_length = mask.size(1), mask.size(0)
# current tgt sent attend to only current src sent
if prev_src_length > 0:
current_mask = torch.cat([input.new_ones(tgt_length, prev_src_length - extra_context_length),
input.new_zeros(tgt_length, src_length + extra_context_length)], dim=-1)
else:
current_mask = input.new_zeros(tgt_length, src_length + extra_context_length)
# the previous target cannot attend to the current source
if prev_tgt_length > 0:
prev_mask = input.new_ones(prev_tgt_length, src_length)
prev_mask = torch.cat([mask, prev_mask], dim=-1)
else:
prev_mask = None
# the output mask has two parts: the prev and the current
if prev_mask is not None:
mask = torch.cat([prev_mask, current_mask], dim=0)
else:
mask = current_mask
mask = mask.bool()
return mask
def create_self_attn_mask(self, input, tgt_lengths, prev_tgt_mem_size):
"""
Create a mask for the target words attending to the past
:param input:
:param tgt_lengths:
:param prev_tgt_mem_size:
:return:
"""
if self.stream_context in ['local', 'global']:
qlen = sum(tgt_lengths.tolist())
mlen = prev_tgt_mem_size
klen = qlen + mlen
mask = torch.triu(input.new_ones(qlen, klen), diagonal=1 + mlen).bool()[:, :, None]
elif self.stream_context in ['limited']:
# past_length = prev_tgt_mem_size
mask = None
# assert prev_tgt_mem_size == 0, "This model is limited and doesn't accept memory"
for length in tgt_lengths:
past_length = mask.size(0) if mask is not None else 0
if past_length > 0:
# don't look at the past
past_mask = input.new_ones(length, past_length)
else:
past_mask = None
# pay attention to the past words in the current sentence
current_mask = torch.triu(input.new_ones(length, length), diagonal=1)
if past_mask is not None:
current_mask = torch.cat([past_mask, current_mask], dim=1)
if mask is None:
mask = current_mask
else:
no_future_mask = input.new_ones(past_length, length)
mask = torch.cat([mask, no_future_mask], dim=1)
mask = torch.cat([mask, current_mask], dim=0)
mask = mask.bool().unsqueeze(-1)
return mask
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
input = input.transpose(0, 1) # T x B
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = emb * math.sqrt(self.model_size)
if streaming:
src_lengths = kwargs.get("src_lengths", None)
tgt_lengths = kwargs.get("tgt_lengths", None)
streaming_state = kwargs.get("streaming_state")
# mems = streaming_state.tgt_mems
mem_len = streaming_state.prev_tgt_mem_size
extra_context = streaming_state.extra_context
extra_context_length = extra_context.size(0) if extra_context is not None else 0
# mem_len = mems[0].size(0) if mems is not None else 0
else:
mem_len = 0
mems = None
extra_context = None
if self.double_position:
assert input_pos is not None
tgt_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_).squeeze(1).view(tgt_len, bsz, -1)
emb = emb + abs_pos
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[0])
emb[0] = bos_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
if streaming:
context_attn_mask = self.create_context_mask(input, src,
src_lengths, tgt_lengths,
extra_context_length)
mask_src = context_attn_mask.unsqueeze(0)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
qlen = input.size(0)
klen = qlen + mem_len
# preparing self-attention mask. The input is either left or right aligned
if streaming:
dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, mem_len)
else:
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mem_len).byte()[:, :, None]
pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
dec_attn_mask = dec_attn_mask.gt(0)
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
output = self.preprocess_layer(emb.contiguous())
if streaming:
hids = [output]
if extra_context is not None:
context = torch.cat([extra_context, context], dim=0)
# print(context.size(), context_attn_mask.size())
for i, layer in enumerate(self.layer_modules):
# batch_size x src_len x d_model output, coverage = layer(output, context, pos_emb, self.r_w_bias,
# self.r_r_bias, dec_attn_mask, mask_src)
# mems_i = mems[i] if mems is not None and streaming and
# self.stream_context in ['local', 'global'] else None
if streaming:
buffer = streaming_state.tgt_buffer[i]
output, coverage, buffer = layer(output, context, dec_attn_mask, context_attn_mask,
incremental=True, incremental_cache=buffer, reuse_source=False)
streaming_state.tgt_buffer[i] = buffer
else:
output, coverage, _ = layer(output, context, dec_attn_mask, mask_src)
# if streaming:
# hids.append(output)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
if streaming:
streaming_state.prev_tgt_mem_size += sum(tgt_lengths.tolist())
streaming_state.prune_target_memory(self.max_memory_size)
# if we use the extra context: keep the last context
if self.extra_context_size > 0:
extra_context = context[-self.extra_context_size:].detach()
streaming_state.extra_context = extra_context
# if self.stream_context in ['local', 'global']:
# streaming_state.update_tgt_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
def step(self, input, decoder_state, streaming=False):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
if streaming:
return self.step_streaming(input, decoder_state)
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1) # B x T
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
# use the last value of input to continue decoding
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
else:
input_ = input.transpose(0, 1)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_) * math.sqrt(self.model_size)
input = input.transpose(0, 1)
klen = input.size(0)
# emb = self.word_lut(input) * math.sqrt(self.model_size)
if self.double_position:
input_pos = torch.arange(input.size(0), dtype=emb.dtype, device=emb.device)
input_pos = input_pos.unsqueeze(1).repeat(1, input.size(1))
tgt_len, bsz = input_pos.size(0), input_pos.size(1)
input_pos_ = input_pos.view(-1).type_as(emb)
abs_pos = self.positional_encoder(input_pos_).squeeze(1).view(tgt_len, bsz, -1)
emb = emb + abs_pos[-1:, :, :]
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H
if self.language_embedding_type in ['sum', 'all_sum']:
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
if input.size(0) == 1:
emb[0] = lang_emb
lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
# prepare position encoding
qlen = emb.size(0)
mlen = klen - qlen
dec_attn_mask = torch.triu(
emb.new_ones(qlen, klen), diagonal=1 + mlen).byte()[:, :, None]
pad_mask = input.eq(onmt.constants.PAD).byte() # L x B
dec_attn_mask = dec_attn_mask + pad_mask.unsqueeze(0)
dec_attn_mask = dec_attn_mask.gt(0)
if onmt.constants.torch_version >= 1.2:
dec_attn_mask = dec_attn_mask.bool()
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
# assert (output.size(0) == 1)
# output, coverage, buffer = layer.step(output, context, pos_emb,
# dec_attn_mask, mask_src, buffer=buffer)
output, coverage, buffer = layer(output, context, dec_attn_mask, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
output = self.postprocess_layer(output)
output = output[-1].unsqueeze(0)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
def step_streaming(self, input, decoder_state):
"""Step function in streaming case"""
raise NotImplementedError
# context = decoder_state.context
# lang = decoder_state.tgt_lang
# streaming_state = decoder_state.streaming_state
#
# # for global model: push the context in
#
# if decoder_state.concat_input_seq:
# if decoder_state.input_seq is None:
# decoder_state.input_seq = input
# else:
# # concatenate the last input to the previous input sequence
# decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
# input = decoder_state.input_seq.transpose(0, 1) # B x T
#
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# # use the last value of input to continue decoding
# if input.size(1) > 1:
# input_ = input[:, -1].unsqueeze(1).transpose(0, 1)
# else:
# input_ = input.transpose(0, 1)
#
# emb = self.word_lut(input_) * math.sqrt(self.model_size)
# input = input.transpose(0, 1) # B x T to T x B
# klen = input.size(0)
#
# # If we start a new sentence to decode: reset the context memory
# if klen == 1:
# streaming_state.reset_context_memory()
# if self.stream_context == 'limited':
# streaming_state.reset_target_memory()
#
# if self.use_language_embedding:
# lang_emb = self.language_embeddings(lang) # B x H or 1 x H
# if self.language_embedding_type == 'sum':
# emb = emb + lang_emb
# elif self.language_embedding_type == 'concat':
# # replace the bos embedding with the language
# bos_emb = lang_emb.expand_as(emb[0])
# emb[0] = bos_emb
#
# lang_emb = lang_emb.unsqueeze(0).expand_as(emb)
# concat_emb = torch.cat([emb, lang_emb], dim=-1)
# emb = torch.relu(self.projector(concat_emb))
# else:
# raise NotImplementedError
#
# # need to manually definte src_lengths and tgt_lengths here
# src_lengths = torch.LongTensor([context.size(0)])
# tgt_lengths = torch.LongTensor([1])
#
# if context is not None:
# context_attn_mask = self.create_context_mask(input, src, src_lengths, tgt_lengths)
# context_attn_mask = context_attn_mask.unsqueeze(0)
# else:
# context_attn_mask = None
#
# dec_attn_mask = self.create_self_attn_mask(input, tgt_lengths, streaming_state.prev_tgt_mem_size)
#
# dec_attn_mask = dec_attn_mask[:, -1:, :]
#
# klen = 1 + streaming_state.prev_tgt_mem_size
#
# output = emb
#
# for i, layer in enumerate(self.layer_modules):
# # T x B x d_model
# buffer = streaming_state.tgt_buffer[i]
# # output, coverage = layer(output, context, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask, mask_src)
# # reuse_source = True if input.size(1) == 1 else False
# reuse_source = True
#
# # reuse source is True in this case because we can reuse the context ...
# output, coverage, buffer = layer(output, context, dec_attn_mask, context_attn_mask,
# incremental=True, incremental_cache=buffer, reuse_source=reuse_source)
# streaming_state.tgt_buffer[i] = buffer
#
# output = self.postprocess_layer(output)
#
# streaming_state.prev_tgt_mem_size += 1
# streaming_state.prune_target_memory(self.max_memory_size + input.size(0))
#
# extra_context = context[-self.extra_context_size:].detach()
#
# output_dict = defaultdict(lambda: None, {'hidden': output, 'coverage': coverage, 'context': context})
# output_dict['streaming_state'] = streaming_state
#
# return output_dict | 30,203 | 41.721358 | 127 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/unified_transformer.py | import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, TransformerDecodingState
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.universal_transformer_layers import UniversalEncoderLayer, UniversalDecoderLayer
# from onmt.models.relative_transformer_layers import RelativeTransformerEncoderLayer, RelativeTransformerDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
torch.set_printoptions(profile="full")
class UnifiedTransformer(TransformerDecoder):
"""
This class combines the encoder and the decoder into one single sequence
Joined attention between encoder and decoder parts
"""
def __init__(self, opt, src_embedding, tgt_embedding, generator, positional_encoder,
language_embeddings=None, encoder_type='text', **kwargs):
self.death_rate = opt.death_rate
self.bidirectional = opt.bidirectional
self.layer_modules = []
# build_modules will be called from the inherited constructor
super(UnifiedTransformer, self).__init__(opt, tgt_embedding,
positional_encoder,
language_embeddings=language_embeddings,
allocate_positions=True)
self.src_embedding = src_embedding
self.tgt_embedding = tgt_embedding
# self.language_embedding = nn.Embedding(3, self.model_size, padding_idx=0)
self.generator = generator
self.ignore_source = True
self.encoder_type = opt.encoder_type
# self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
self.d_head = self.model_size // self.n_heads
# self.build_modules()
def gen_mask(self, src, tgt):
input_seq = torch.cat([src, tgt], dim=-1)
seq_len = input_seq.size(1)
if self.bidirectional:
bsz, src_len = src.size(0), src.size(1)
tgt_len = tgt.size(1)
tgt_tgt_mask = torch.triu(src.new_ones(tgt_len, tgt_len), diagonal=1)
tgt_src_mask = src.new_zeros(tgt_len, src_len)
tgt_mask = torch.cat([tgt_src_mask, tgt_tgt_mask], dim=-1)
src_src_mask = src.new_zeros(src_len, src_len)
src_tgt_mask = src.new_ones(src_len, tgt_len)
src_mask = torch.cat([src_src_mask, src_tgt_mask], dim=-1)
attn_mask = torch.cat([src_mask, tgt_mask], dim=0)
attn_mask = attn_mask.bool()
pad_mask = input_seq.eq(onmt.constants.PAD).unsqueeze(1)
attn_mask = attn_mask | pad_mask
# attn_mask = attn_mask.byte() + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# print(attn_mask[0])
# attn_mask = torch.gt(attn_mask, 0).bool()
else:
attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
attn_mask = torch.gt(attn_mask, 0).bool()
return attn_mask
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
self.layer_modules = nn.ModuleList()
for l in range(self.layers):
# linearly decay the death rate
death_r = (l + 1.0) / self.layers * self.death_rate
block = DecoderLayer(opt, death_rate=death_r)
self.layer_modules.append(block)
def forward(self, batch, target_mask=None, **kwargs):
src = batch.get('source').transpose(0, 1) # src_len x batch_size -> bsz x src_len
tgt = batch.get('target_input').transpose(0, 1) # len_tgt x batch_size -> bsz x tgt_len
src_pos = batch.get('source_pos')
tgt_pos = batch.get('target_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
tgt_len = tgt.size(1)
src_len = src.size(1)
bsz = tgt.size(0)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
tgt_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
tgt_emb = self.time_transformer(tgt_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb.unsqueeze(1)
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=1) # L x batch_size x H
# prepare self-attention mask
# For the source: we have two different parts
# [1 x src_len x batch_size]
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(0).byte()
# src_pad_mask = mask_src_src
# # Attention from src to target: everything is padded
# mask_src_tgt = mask_src_src.new_ones(1, 1, 1).expand(src_len, tgt_len, bsz)
# # [src_len x L x batch_size]
# mask_src = torch.cat([mask_src_src.expand(src_len, src_len, bsz), mask_src_tgt], dim=1)
# mask_src = mask_src.bool()
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
# mask_src_tgt = mask_src_src.new_ones(bsz, src_len, tgt_len) # bsz x src_len x tgt_len
#
# mask_src = torch.cat([mask_src_src.expand(bsz, src_len, src_len), mask_src_tgt], dim=-1)
#
# # For the target:
# mask_tgt_tgt = tgt.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
#
# attn_mask = torch.cat([mask_src, mask_tgt], dim=1).bool() # L x L x batch_size
# lets try to use language modeling style
# input_seq = torch.cat([src, tgt], dim=-1)
# seq_len = input_seq.size(1)
#
# attn_mask = self.mask[:seq_len, :seq_len] + input_seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# attn_mask = torch.gt(attn_mask, 0).bool()
attn_mask = self.gen_mask(src, tgt)
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, None, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
# extract the "source" and "target" parts of the output
context = output[:src_len, :, :]
output = output[-tgt_len:, :, :]
output_dict = {'hidden': output, 'coverage': coverage, 'context': context, 'src': src,
'target_mask': target_mask}
# final layer: computing log probabilities
logprobs = self.generator[0](output_dict)
output_dict['logprobs'] = logprobs
return output_dict
def encode(self, input, decoder_state, input_pos=None, input_lang=None):
buffers = decoder_state.attention_buffers
src_lang = input_lang
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, input, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
emb = src_emb
src_len = input.size(1)
bsz = input.size(0)
mask_src_src = input.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
mask_src = mask_src_src
attn_mask = mask_src.bool() # L x L x batch_size
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
# context and context_mask are None
buffer = buffers[i] if i in buffers else None
output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer)
decoder_state.update_attention_buffer(buffer, i)
# Final normalization
output = self.postprocess_layer(output)
return output
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
# raise NotImplementedError
tgt_output = batch.get('target_output')
output_dict = self.forward(batch, target_mask=None)
context = output_dict['context']
logprobs = output_dict['logprobs']
batch_size = logprobs.size(1)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
for gen_t, tgt_t in zip(logprobs, tgt_output):
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def renew_buffer(self, new_len):
# This model uses pre-allocated position encoding
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len + 1, new_len + 1)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
return
def reset_states(self):
return
def step(self, input, decoder_state):
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
tgt = input
tgt_lang = decoder_state.tgt_lang
src_lang = decoder_state.src_lang
# print(src.size(), tgt.size())
# print(src_lang, tgt_lang)
tgt_len = tgt.size(1)
src_len = src.size(1)
bsz = tgt.size(0)
# Embedding stage (and scale the embedding)
src_emb = embedded_dropout(self.src_embedding, src, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
tgt_emb = embedded_dropout(self.tgt_embedding, tgt, dropout=self.word_dropout if self.training else 0) \
* math.sqrt(self.model_size)
# Add position encoding
src_emb = self.time_transformer(src_emb)
tgt_emb = self.time_transformer(tgt_emb)
if self.use_language_embedding:
if self.language_embedding_type in ["sum", "all_sum"]:
src_lang_emb = self.language_embeddings(src_lang)
src_emb += src_lang_emb.unsqueeze(1)
tgt_lang_emb = self.language_embeddings(tgt_lang)
tgt_emb += tgt_lang_emb.unsqueeze(1)
# concatenate embedding
emb = torch.cat([src_emb, tgt_emb], dim=1) # L x batch_size x H
# prepare self-attention mask
# For the source: we have two different parts
# [1 x src_len x batch_size]
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(0).byte()
# src_pad_mask = mask_src_src
# # Attention from src to target: everything is padded
# mask_src_tgt = mask_src_src.new_ones(1, 1, 1).expand(src_len, tgt_len, bsz)
# # [src_len x L x batch_size]
# mask_src = torch.cat([mask_src_src.expand(src_len, src_len, bsz), mask_src_tgt], dim=1)
# mask_src = mask_src.bool()
# mask_src_src = src.eq(onmt.constants.PAD).unsqueeze(1).byte() # B x 1 x src_len
# mask_src_tgt = mask_src_src.new_ones(bsz, src_len, tgt_len) # bsz x src_len x tgt_len
#
# mask_src = torch.cat([mask_src_src.expand(bsz, src_len, src_len), mask_src_tgt], dim=-1)
#
# # For the target:
# mask_tgt_tgt = tgt.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
# attn_mask = torch.cat([mask_src, mask_tgt], dim=1).bool() # L x L x batch_size
attn_mask = self.gen_mask(src, input)
# seq = torch.cat([src, input], dim=-1)
# seq_len = seq.size(1)
# attn_mask = self.mask[:seq_len, :seq_len] + seq.eq(onmt.constants.PAD).byte().unsqueeze(1)
# attn_mask = torch.gt(attn_mask, 0).bool()
output = emb
# Applying dropout and tranpose to T x B x H
output = self.preprocess_layer(output).transpose(0, 1)
# FORWARD PASS
coverage = None
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, None, attn_mask, None) # context and context_mask are None
# Final normalization
output = self.postprocess_layer(output)
output = output[-1:, :, :]
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
logprobs = self.generator[0](output_dict).squeeze(0)
output_dict['src'] = decoder_state.src.transpose(0, 1)
output_dict['log_prob'] = logprobs
output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
# buffers = decoder_state.attention_buffers
# tgt_lang = decoder_state.tgt_lang
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# if decoder_state.concat_input_seq:
# if decoder_state.input_seq is None:
# decoder_state.input_seq = input
# else:
# # concatenate the last input to the previous input sequence
# decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
#
# # For Transformer, both inputs are assumed as B x T (batch first)
# input = decoder_state.input_seq.transpose(0, 1)
# src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
#
# if input.size(1) > 1:
# input_ = input[:, -1].unsqueeze(1)
# else:
# input_ = input
# """ Embedding: batch_size x 1 x d_model """
# # check = input_.gt(self.word_lut.num_embeddings)
# print(input.size())
# emb = self.tgt_embedding(input_) * math.sqrt(self.model_size)
#
# """ Adding positional encoding """
# emb = self.time_transformer(emb, t=input.size(1))
#
# if self.use_language_embedding:
# if self.language_embedding_type in ["sum", "all_sum"]:
#
# tgt_lang_emb = self.language_embeddings(tgt_lang)
# emb += tgt_lang_emb.unsqueeze(1)
#
# emb = emb.transpose(0, 1)
#
# # attention mask For the target:
# tgt_len = input.size(1)
# bsz = input.size(0)
# src_len = src.size(1)
# mask_tgt_tgt = input.eq(onmt.constants.PAD).byte().unsqueeze(1) + self.mask[:tgt_len, :tgt_len]
# mask_tgt_tgt = torch.gt(mask_tgt_tgt, 0).byte() # bsz x tgt_len x tgt_len
#
# mask_tgt_src = mask_tgt_tgt.new_zeros(bsz, tgt_len, src_len) + src.eq(onmt.constants.PAD).unsqueeze(1).byte()
#
# mask_tgt = torch.cat([mask_tgt_src, mask_tgt_tgt], dim=-1) # bsz x tgt_len x T
#
# # take the last element of the 'target sequence' for the mask
# attn_mask = mask_tgt[:, -1, :].unsqueeze(1).bool()
#
# output = emb
#
# for i, layer in enumerate(self.layer_modules):
# buffer = buffers[i] if i in buffers else None
# assert (output.size(0) == 1)
#
# output, coverage, buffer = layer.step(output, None, attn_mask, None, buffer=buffer)
#
# decoder_state.update_attention_buffer(buffer, i)
#
# # Final normalization
# output_dict = defaultdict(lambda: None)
# output_dict['hidden'] = output
#
# logprobs = self.generator[0](output_dict).squeeze(0)
#
# output_dict['src'] = decoder_state.src.transpose(0, 1)
# output_dict['log_prob'] = logprobs
# output_dict['coverage'] = logprobs.new(bsz, tgt_len, src_len).zero_()
return output_dict
def create_decoder_state(self, batch, beam_size=1, type=1):
src = batch.get('source')
src_pos = batch.get('source_pos')
src_lang = batch.get('source_lang')
tgt_lang = batch.get('target_lang')
src_transposed = src.transpose(0, 1) # B x T
decoder_state = TransformerDecodingState(src, tgt_lang, None, None,
beam_size=beam_size, model_size=self.model_size, type=type)
# forward pass through the input to get the buffer
# _ = self.encode(src_transposed, decoder_state, input_pos=src_pos, input_lang=src_lang)
decoder_state.src_lang = src_lang
# buffers = decoder_state.attention_buffers
# bsz = src.size(1)
# new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
# new_order = new_order.to(src.device)
#
# for l in buffers:
# buffer_ = buffers[l]
# if buffer_ is not None:
# for k in buffer_.keys():
# t_, br_, d_ = buffer_[k].size()
# buffer_[k] = buffer_[k].index_select(1, new_order) # 1 for time first
return decoder_state
def tie_weights(self):
assert self.generator is not None, "The generator needs to be created before sharing weights"
self.generator[0].linear.weight = self.tgt_embedding.weight
def share_enc_dec_embedding(self):
self.src_embedding.weight = self.tgt_embedding.weight | 19,467 | 40.866667 | 119 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/universal_transformer.py | import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.legacy.old_models.universal_transformer_layers import UniversalEncoderLayer, UniversalDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
class UniversalTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
self.universal_layer = None
self.max_layers = opt.layers
# build_modules will be called from the inherited constructor
super(UniversalTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
self.positional_encoder = positional_encoder
# learnable embeddings for each layer
self.layer_embedding = nn.Embedding(opt.layers, opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Encoder with Absolute Attention with %.2f expected layers" % e_length)
self.universal_layer = UniversalEncoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x 1 x len_src for broadcasting
# apply switchout
# if self.switchout > 0 and self.training:
# vocab_size = self.word_lut.weight.size(0)
# input = switchout(input, vocab_size, self.switchout)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4].unsqueeze(1)
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
time_encoding = self.positional_encoder.get_positional_embeddings(emb)
# B x T x H -> T x B x H
context = self.preprocess_layer(emb.transpose(0, 1))
for i in range(self.max_layers):
layer_vector = torch.LongTensor([i]).to(emb.device)
layer_vector = self.layer_embedding(layer_vector).unsqueeze(0) # 1 x 1 x model_size
context = self.universal_layer(context, time_encoding, layer_vector, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': mask_src, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class UniversalTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.universal_layer = None
opt.ignore_source = ignore_source
self.max_layers = opt.layers
# build_modules will be called from the inherited constructor
super(UniversalTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source)
self.positional_encoder = positional_encoder
# Parameters for the position biases
self.layer_embeddings = nn.Embedding(opt.layers, opt.model_size)
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Decoder with Absolute Attention with %.2f expected layers" % e_length)
self.universal_layer = UniversalDecoderLayer(self.opt, death_rate=self.death_rate)
# TODO: merging forward_stream and forward
# TODO: write a step function for encoder
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
mask_tgt = mask_tgt.bool()
time_embedding = self.positional_encoder.get_positional_embeddings(emb)
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
for i in range(self.max_layers):
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
output, coverage, _ = self.universal_layer(output, time_embedding, layer_embedding, context,
mask_tgt, mask_src)
# last layer norm
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input_)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
time_embedding = self.time_transformer.get_positional_embeddings(emb, t=input.size(1))
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if mask_src is None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte().unsqueeze(0)
# # only get the final step of the mask during decoding (because the input of the network is only the last step)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# mask_tgt = None
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
for i in range(self.max_layers):
buffer = buffers[i] if i in buffers else None
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
assert (output.size(0) == 1)
output, coverage, buffer = self.universal_layer(output, time_embedding, layer_embedding, context,
mask_tgt, mask_src,
incremental=True, incremental_cache=buffer)
decoder_state.update_attention_buffer(buffer, i)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 14,946 | 42.074928 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/universal_transformer_layers.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.modules.static_dropout import StaticDropout
from onmt.modules.linear import XavierLinear as Linear
from onmt.modules.linear import XavierLinear
from onmt.modules.linear import group_linear, FeedForwardSwish
from onmt.modules.linear import FeedForward
from onmt.modules.attention import MultiHeadAttention
from onmt.modules.dropout import VariationalDropout
from onmt.modules.optimized.encdec_attention import EncdecMultiheadAttn
from onmt.modules.optimized.self_attention import SelfMultiheadAttn
from collections import defaultdict
from onmt.models.transformers import PrePostProcessing, EncoderLayer, DecoderLayer
class UniversalEncoderLayer(EncoderLayer):
def __init__(self, opt, death_rate=0.0, **kwargs):
super().__init__(opt, death_rate=death_rate)
def forward(self, input, time_embedding, layer_vector, attn_mask):
input = input + time_embedding.unsqueeze(1) + layer_vector
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
# print(query.size(), attn_mask.size())
if self.fast_self_attention:
out, _ = self.multihead(query, query, query, attn_mask, None)
else:
out, _ = self.multihead(query, query, query, attn_mask)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input
class UniversalDecoderLayer(DecoderLayer):
def __init__(self, opt, death_rate=0.0):
super().__init__(opt, death_rate=death_rate)
def forward(self, input, time_embedding, layer_vector, context, mask_tgt, mask_src,
incremental=False, incremental_cache=None, reuse_source=True):
"""
:param input:
:param layer_vector:
:param context:
:param mask_tgt:
:param mask_src:
:param incremental:
:param incremental_cache:
:param reuse_source:
:return:
"""
# sum up
input = input + time_embedding.unsqueeze(1) + layer_vector
assert(len(input.shape) == 3)
if incremental:
if incremental_cache is None:
incremental_cache = dict()
coverage = None
coin = True
if self.training:
coin = (torch.rand(1)[0].item() >= self.death_rate)
if coin:
query = self.preprocess_attn(input)
if self.fast_self_attention:
out, _, = self.multihead_tgt(query, query, query, None, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
else:
out, _, = self.multihead_tgt(query, query, query, mask_tgt,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
if not self.ignore_source:
query = self.preprocess_src_attn(input)
out, coverage = self.multihead_src(query, context, context, mask_src,
incremental=incremental,
incremental_cache=incremental_cache)
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_src_attn(out, input)
else:
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
if self.training and self.death_rate > 0:
out = out / (1 - self.death_rate)
input = self.postprocess_ffn(out, input)
return input, coverage, incremental_cache
| 4,861 | 33.48227 | 87 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/old_models/relative_universal_transformer.py | import torch
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.models.transformer_layers import EncoderLayer, DecoderLayer
from onmt.models.transformers import TransformerEncoder, TransformerDecoder, Transformer, TransformerDecodingState
import onmt
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import PrePostProcessing
from onmt.legacy.old_models.relative_universal_transformer_layers import \
RelativeUniversalEncoderLayer, RelativeUniversalDecoderLayer
from onmt.utils import flip, expected_length
from collections import defaultdict
import math
import sys
torch.set_printoptions(threshold=500000)
# Positional Embedding with discrete inputs
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, demb):
super(SinusoidalPositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, sin_first=True, bsz=None):
"""
:param bsz:
:param pos_seq: sequences of RELATIVE position indices (can be negative for future)
:param sin_first: in Attention is all you need paper, sin is first then cosin
"""
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
if sin_first:
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
else:
pos_emb = torch.cat([sinusoid_inp.cos(), sinusoid_inp.sin()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].repeat(1, bsz, 1)
else:
return pos_emb[:, None, :]
class RelativeUniversalTransformerEncoder(TransformerEncoder):
def __init__(self, opt, dicts, positional_encoder, encoder_type='text', language_embeddings=None):
self.death_rate = opt.death_rate
self.double_position = opt.double_position
self.learnable_position_encoding = opt.learnable_position_encoding
self.layer_modules = list()
self.asynchronous = opt.asynchronous
self.max_memory_size = opt.max_memory_size
self.extra_context_size = opt.extra_context_size
self.max_pos_length = opt.max_pos_length
self.universal_layer = None
self.unidirectional = opt.unidirectional
self.adaptive_type = opt.adaptive
# build_modules will be called from the inherited constructor
super(RelativeUniversalTransformerEncoder, self).__init__(opt, dicts, positional_encoder, encoder_type,
language_embeddings)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# learnable embeddings for each layer
self.layer_embedding = nn.Embedding(self.layers, opt.model_size)
self.d_head = self.model_size // self.n_heads
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Encoder with Relative Attention with %.2f expected layers" % e_length)
self.universal_layer = RelativeUniversalEncoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: batch_size x src_len (wanna tranpose)
Outputs Shapes:
out: batch_size x src_len x d_model
mask_src
"""
""" Embedding: batch_size x src_len x d_model """
if self.input_type == "text":
mask_src = input.eq(onmt.constants.PAD) # batch_size x len_src
# apply switchout
# if self.switchout > 0 and self.training:
# vocab_size = self.word_lut.weight.size(0)
# input = switchout(input, vocab_size, self.switchout)
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
else:
if not self.cnn_downsampling:
mask_src = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
emb = self.audio_trans(input.contiguous().view(-1, input.size(2))).view(input.size(0),
input.size(1), -1)
emb = emb.type_as(input)
else:
long_mask = input.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
input = input.narrow(2, 1, input.size(2) - 1)
# first resizing to fit the CNN format
input = input.view(input.size(0), input.size(1), -1, self.channels)
input = input.permute(0, 3, 1, 2)
input = self.audio_trans(input)
input = input.permute(0, 2, 1, 3).contiguous()
input = input.view(input.size(0), input.size(1), -1)
# print(input.size())
input = self.linear_trans(input)
mask_src = long_mask[:, 0:input.size(1) * 4:4]
# the size seems to be B x T ?
emb = input
mask_src = mask_src.bool()
""" Scale the emb by sqrt(d_model) """
emb = emb * math.sqrt(self.model_size)
""" Adding language embeddings """
if self.use_language_embedding:
assert self.language_embedding is not None
if self.language_embedding_type in ['sum', 'all_sum']:
lang_emb = self.language_embedding(input_lang)
emb = emb + lang_emb.unsqueeze(1)
mem_len = 0
qlen = input.size(1)
klen = qlen + mem_len
if self.unidirectional:
pos = torch.arange(klen - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
else:
pos = torch.arange(klen - 1, -klen, -1.0, device=emb.device, dtype=emb.dtype)
# pos_emb has size 2T+1 x 1 x H
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
# B x T x H -> T x B x H
context = self.preprocess_layer(emb.transpose(0, 1))
time_encoding = self.preprocess_layer(time_encoding)
# print(input.size(), context.size(), pos.size(), time_encoding.size())
for i in range(self.layers):
layer_vector = torch.LongTensor([i]).to(emb.device)
layer_vector = self.layer_embedding(layer_vector).unsqueeze(0) # 1 x 1 x model_size
context = self.universal_layer(context, time_encoding, layer_vector, mask_src)
# last layer norm
context = self.postprocess_layer(context)
output_dict = defaultdict(lambda: None, {'context': context, 'src_mask': mask_src, 'src': input})
if streaming:
streaming_state.prev_src_mem_size += sum(input_length.tolist())
streaming_state.prune_source_memory(self.max_memory_size)
# streaming_state.update_src_mems(hids, qlen)
output_dict['streaming_state'] = streaming_state
return output_dict
class RelativeUniversalTransformerDecoder(TransformerDecoder):
def __init__(self, opt, dicts, positional_encoder, language_embeddings=None, ignore_source=False):
self.death_rate = opt.death_rate
self.max_memory_size = opt.max_memory_size
self.stream_context = opt.stream_context
self.extra_context_size = opt.extra_context_size
self.universal_layer = None
opt.ignore_source = ignore_source
# build_modules will be called from the inherited constructor
super(RelativeUniversalTransformerDecoder, self).__init__(opt, dicts,
positional_encoder,
language_embeddings,
ignore_source, allocate_positions=False)
self.positional_encoder = SinusoidalPositionalEmbedding(opt.model_size)
# Parameters for the position biases
self.layer_embeddings = nn.Embedding(opt.layers, opt.model_size)
def renew_buffer(self, new_len):
return
def build_modules(self):
e_length = expected_length(self.layers, self.death_rate)
print("* Universal Transformer Decoder with Relative Attention with %.2f expected layers" % e_length)
self.universal_layer = RelativeUniversalDecoderLayer(self.opt, death_rate=self.death_rate)
def forward(self, input, context, src, input_pos=None, input_lang=None, streaming=False, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x src_len x d_model
mask_src (Tensor) batch_size x src_len
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x src_len
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.use_language_embedding:
lang_emb = self.language_embeddings(input_lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb.unsqueeze(1)
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
if context is not None:
if self.encoder_type == "audio":
if not self.encoder_cnn_downsampling:
mask_src = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
else:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4]
else:
mask_src = src.data.eq(onmt.constants.PAD)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(emb.new_ones(len_tgt, len_tgt), diagonal=1).byte()
mask_tgt = mask_tgt.bool()
pos = torch.arange(len_tgt - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
output = self.preprocess_layer(emb.transpose(0, 1).contiguous())
time_encoding = self.preprocess_layer(time_encoding)
for i in range(self.layers):
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
output, coverage, _ = self.universal_layer(output, context, time_encoding, layer_embedding,
mask_tgt, mask_src)
# last layer norm
output = self.postprocess_layer(output)
output_dict = {'hidden': output, 'coverage': coverage, 'context': context}
output_dict = defaultdict(lambda: None, output_dict)
return output_dict
def step(self, input, decoder_state, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (to be transposed)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
context = decoder_state.context
buffers = decoder_state.attention_buffers
lang = decoder_state.tgt_lang
mask_src = decoder_state.src_mask
if decoder_state.concat_input_seq:
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
src = decoder_state.src.transpose(0, 1) if decoder_state.src is not None else None
if input.size(1) > 1:
input_ = input[:, -1].unsqueeze(1)
else:
input_ = input
""" Embedding: batch_size x 1 x d_model """
check = input_.gt(self.word_lut.num_embeddings)
emb = self.word_lut(input)
""" Adding positional encoding """
emb = emb * math.sqrt(self.model_size)
# emb should be batch_size x 1 x dim
if self.use_language_embedding:
if self.use_language_embedding:
lang_emb = self.language_embeddings(lang) # B x H or 1 x H
if self.language_embedding_type == 'sum':
emb = emb + lang_emb
elif self.language_embedding_type == 'concat':
# replace the bos embedding with the language
if input.size(1) == 1:
bos_emb = lang_emb.expand_as(emb[:, 0, :])
emb[:, 0, :] = bos_emb
lang_emb = lang_emb.unsqueeze(1).expand_as(emb)
concat_emb = torch.cat([emb, lang_emb], dim=-1)
emb = torch.relu(self.projector(concat_emb))
else:
raise NotImplementedError
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
if context is not None:
if mask_src is None:
if self.encoder_type == "audio":
if src.data.dim() == 3:
if self.encoder_cnn_downsampling:
long_mask = src.data.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.narrow(2, 0, 1).squeeze(2).eq(onmt.constants.PAD).unsqueeze(1)
elif self.encoder_cnn_downsampling:
long_mask = src.eq(onmt.constants.PAD)
mask_src = long_mask[:, 0:context.size(0) * 4:4].unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = src.eq(onmt.constants.PAD).unsqueeze(1)
else:
mask_src = None
len_tgt = input.size(1)
mask_tgt = torch.triu(
emb.new_ones(len_tgt, len_tgt), diagonal=1).byte()
# # only get the final step of the mask during decoding (because the input of the network is only the last step)
# mask_tgt = mask_tgt[-1].unsqueeze(0)
# mask_tgt = None
mask_tgt = mask_tgt.bool()
output = emb.contiguous()
pos = torch.arange(len_tgt - 1, -1, -1.0, device=emb.device, dtype=emb.dtype)
time_encoding = self.positional_encoder(pos, bsz=input.size(0))
# time_encoding = time_encoding[-1].unsqueeze(0)
for i in range(self.layers):
# buffer = buffers[i] if i in buffers else None
layer_tensor = torch.LongTensor([i]).to(output.device)
layer_embedding = self.layer_embeddings(layer_tensor)
# assert (output.size(0) == 1)
output, coverage, _ = self.universal_layer(output, context, time_encoding, layer_embedding,
mask_tgt, mask_src)
# decoder_state.update_attention_buffer(buffer, i)
output = output[-1:]
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['coverage'] = coverage
output_dict['context'] = context
return output_dict
| 16,566 | 41.155216 | 120 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/FCTransformer/Layers.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.utils.weight_norm as WeightNorm
import onmt
import torch.nn.functional as F
from onmt.modules.bottle import Bottle
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
from onmt.modules.static_dropout import StaticDropout
Linear=XavierLinear
def contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
class UniformMultiHeadAttention(nn.Module):
"""Applies multi-head attentions to inputs (query, key, value)
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
Params:
fc_query: FC layer to project query, d_model x (h x d_head)
fc_key: FC layer to project key, d_model x (h x d_head)
fc_value: FC layer to project value, d_model x (h x d_head)
fc_concat: FC layer to concat and project multiheads, d_model x (h x d_head)
Inputs Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Outputs Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, attn_p=0.1):
super(UniformMultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
assert d_model % h == 0
self.d_head = d_model//h
# first attention layer for states
self.fc_query = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_key = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_value = Bottle(Linear(d_model, h*self.d_head, bias=False))
# second attention for layers
#~ self.fc_query_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_key_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_value_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
# for output
self.sm = nn.Softmax(dim=-1)
self.fc_concat = Bottle(Linear(h*self.d_head, d_model, bias=False))
#~ self.fc_concat_2 = Bottle(Linear(d_model, d_model, bias=False))
#~ self.attn_dropout = nn.Dropout(attn_p)
self.attn_dropout = StaticDropout(attn_p)
#~ self.attn_dropout_2 = StaticDropout(attn_p)
def _prepare_proj(self, x):
"""Reshape the projectons to apply softmax on each head
"""
b, l, d = x.size()
return contiguous(x.view(b, l, self.h, self.d_head).transpose(1,2)).view(b*self.h, l, self.d_head)
def shape(self, x):
b, l, d = x.size()
return x.view(b, l, self.h, self.d_head) \
.transpose(1, 2)
def forward(self, query, key, mask=None, query_mask=None, value_mask=None):
n_layer, b, len_key = key.size(0), key.size(1), key.size(2)
if value_mask is not None:
value_mask = value_mask.unsqueeze(0).repeat(n_layer, 1, 1)
key_mask = value_mask # B x T
b, len_query = query.size(0), query.size(1)
value = key
# project inputs to multi-heads
proj_query = self.fc_query(query, mask=query_mask) # batch_size x len_query x h*d_head
proj_key = self.fc_key(key, mask=key_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
proj_value = self.fc_value(value, mask=value_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
# prepare the shape for applying softmax
proj_query = self.shape(proj_query) # batch_size x h x len_query x d_head
proj_key = self.shape(proj_key) # batch_size x h x (n_layer * len_key) x d_head
proj_value = self.shape(proj_value) # batch_size x h x (n_layer * len_key) x d_head
proj_query = proj_query * (self.d_head**-0.5)
# get dotproduct softmax attns for each head
scores = torch.matmul(proj_query, proj_key.transpose(2,3)) # b x self.h x len_query x n_layer*len_key
# applying mask using broadcasting
mask_ = Variable(mask.unsqueeze(-3).unsqueeze(-2))
scores = scores.view(b, self.h, len_query, n_layer, len_key)
scores = scores.masked_fill_(mask_, -float('inf'))
scores = scores.view(b, self.h, len_query, n_layer*len_key)
# softmax on the last dimension (all of the previous states)
attns = self.sm(scores) # b x 1 x len_query x n_layer*lenkey
attns = self.attn_dropout(attns)
out = torch.matmul(attns, proj_value) # b x self.h x len_query x self.d_head)
out = out.transpose(1, 2).contiguous().view(b, len_query, self.h * self.d_head)
out = self.fc_concat(out, mask=query_mask)
#~ out = final_out.view(b, len_query, self.h*self.d_head)
coverage = None
return out, coverage
class HierarchicalMultiHeadAttention(nn.Module):
"""Applies multi-head attentions to inputs (query, key, value)
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
Params:
fc_query: FC layer to project query, d_model x (h x d_head)
fc_key: FC layer to project key, d_model x (h x d_head)
fc_value: FC layer to project value, d_model x (h x d_head)
fc_concat: FC layer to concat and project multiheads, d_model x (h x d_head)
Inputs Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Outputs Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, attn_p=0.1):
super(HierarchicalMultiHeadAttention, self).__init__()
self.h = h
self.d = d_model
assert d_model % h == 0
self.d_head = d_model//h
# first attention layer for states
self.fc_query = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_key = Bottle(Linear(d_model, h*self.d_head, bias=False))
self.fc_value = Bottle(Linear(d_model, h*self.d_head, bias=False))
# second attention for layers
self.fc_query_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_key_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
#~ self.fc_value_2 = Bottle(Linear(d_model, h*self.d_head, bias=False))
# for output
self.fc_concat = Bottle(Linear(h*self.d_head, d_model, bias=False))
self.fc_concat_2 = Bottle(Linear(d_model, d_model, bias=False))
self.sm = nn.Softmax(dim=-1)
self.sm_2 = nn.Softmax(dim=-1)
#~ self.attn_dropout = nn.Dropout(attn_p)
self.attn_dropout = StaticDropout(attn_p)
self.attn_dropout_2 = StaticDropout(attn_p)
def _prepare_proj(self, x):
"""Reshape the projectons to apply softmax on each head
"""
b, l, d = x.size()
return contiguous(x.view(b, l, self.h, self.d_head).transpose(1,2)).view(b*self.h, l, self.d_head)
def shape(self, x):
b, l, d = x.size()
return x.view(b, l, self.h, self.d_head) \
.transpose(1, 2)
def forward(self, query, key, mask=None, query_mask=None, value_mask=None):
n_layer, b, len_key = key.size(0), key.size(1), key.size(2)
#~ query_mask = None
#~ value_mask = None
if value_mask is not None:
value_mask = value_mask.unsqueeze(0).repeat(n_layer, 1, 1)
key_mask = value_mask # n_layer x B x T
b, len_query = query.size(0), query.size(1)
#~ key = key.transpose(0,1).contiguous().view(b, n_layer * len_key, -1)
value = key
# FIRST ATTENTION STEP
# project inputs to multi-heads
proj_query = self.fc_query(query, mask=query_mask) # batch_size x len_query x h*d_head
proj_key = self.fc_key(key, mask=key_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
proj_value = self.fc_value(value, mask=value_mask).transpose(0,1).contiguous().view(b, -1, self.h * self.d_head) # batch_size x (n_layer x len_key) x h*d_head
# prepare the shape for applying softmax
proj_query = self.shape(proj_query) # batch_size x h x len_query x d_head
proj_key = self.shape(proj_key) # batch_size x h x (n_layer * len_key) x d_head
proj_value = self.shape(proj_value) # batch_size x h x (n_layer * len_key) x d_head
proj_query = proj_query * (self.d_head**-0.5)
# get dotproduct softmax attns for each head
scores = torch.matmul(proj_query, proj_key.transpose(2,3)) # b x self.h x len_query x n_layer*len_key
# unshape to softmax on only the len_key dimension
scores = scores.view(b, self.h, len_query, n_layer, len_key)
mask_ = Variable(mask.unsqueeze(1).unsqueeze(-2)) # b x 1 x len_query x 1 x len_key
#~ mask_ = Variable(mask.unsqueeze(-3))
scores = scores.masked_fill_(mask_, -float('inf'))
# softmax on the last dimension (len_key)
#~ attns = self.sm(scores) # b x self.h x len_query x n_layer x len_key
attns = F.softmax(scores, dim=-1)
attns = self.attn_dropout(attns)
# apply attns on value
proj_value = proj_value.view(b, self.h, n_layer, len_key, self.d_head)
attns = attns.transpose(2, 3) # b, self.h, n_layer, len_query, len_key
out = torch.matmul(attns, proj_value) # b x self.h x n_layer x len_query x self.d_head
out = out.transpose(1, 3).contiguous().view(b, len_query, n_layer, self.h * self.d_head)
out = self.fc_concat(out, query_mask.unsqueeze(-1).repeat(1, 1, n_layer))
# 2ND ATTENTION LAYER
new_query = self.fc_query_2(query, mask=query_mask)
new_query = new_query.view(-1, new_query.size(-1)).unsqueeze(1) # batch_size*len_query x 1 x h*d_head
proj_query = self.shape(new_query) # batch_size*len_query x h x 1 x d_head
new_key = out.view(-1, n_layer, self.h * self.d_head) # b*len_query x n_layer x h*self.d_head
proj_key = self.shape(new_key) # batch_size*len_query x h x n_layer x d_head
if query_mask is not None:
flattened_mask = query_mask.view(-1)
non_pad_indices = torch.nonzero(flattened_mask).squeeze(1)
proj_query = proj_query.index_select(0, non_pad_indices)
proj_key = proj_key.index_select(0, non_pad_indices)
proj_value = proj_key
scores_2 = torch.matmul(proj_query, proj_key.transpose(2,3)) # batch_size*len_query x h x 1 x n_layer
# no need to mask this time
attns_2 = F.softmax(scores_2, dim=-1) # batch_size*len_query x h x 1 x n_layer
#~ attns_2 = self.attn_dropout(attns_2)
out = torch.matmul(attns_2, proj_value) # batch_size*len_query x h x 1 x d_head
b_ = out.size(0)
#~ out = out.transpose(1, 2).unsqueeze(1).contiguous().view(b_, self.h * self.d_head) # batch_size x len_query x h*d_head
out = out.unsqueeze(2).view(-1, self.h * self.d_head)
out = self.fc_concat_2(out)
if query_mask is not None:
final_out = Variable(out.data.new(b*len_query, self.h * self.d_head).zero_())
final_out.index_copy_(0, non_pad_indices, out)
else:
final_out = out
out = final_out.view(b, len_query, self.h*self.d_head)
coverage = None
return out, coverage
class FCTEncoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one encoder layer
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead: multi-head attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
mask: batch_size x len_query x len_key or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
super(FCTEncoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~ self.multihead = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, memory_bank, attn_mask, pad_mask=None):
query = self.preprocess_attn(input)
if memory_bank is None:
memory_bank = query.unsqueeze(0)
else:
#~ memory_bank = query.unsqueeze(0)
memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
""" Deep attention layer """
out, _ = self.multihead(query, memory_bank, attn_mask,
query_mask=pad_mask, value_mask=pad_mask)
input = self.postprocess_attn(out, input, mask=pad_mask)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask),
mask=pad_mask)
input = self.postprocess_ffn(out, input, mask=pad_mask)
return input, memory_bank
class FCTDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1):
super(FCTDecoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_src_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_src_attn = PrePostProcessing(d_model, p, sequence='da', static=True)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=True)
#~ self.multihead_tgt = HierarchicalMultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead_tgt = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
#~ self.multihead_src = MultiHeadAttention(h, d_model, attn_p=attn_p)
self.multihead_src = UniformMultiHeadAttention(h, d_model, attn_p=attn_p)
if onmt.constants.activation_layer == 'linear_relu_linear':
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p)
elif onmt.constants.activation_layer == 'maxout':
k = int(math.ceil(d_ff / d_model))
feedforward = MaxOut(d_model, d_model, k)
self.feedforward = Bottle(feedforward)
def forward(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if memory_bank is None:
memory_bank = query.unsqueeze(0)
else:
#~ memory_bank = query.unsqueeze(0)
memory_bank = torch.cat([memory_bank, query.unsqueeze(0)], dim=0) # n_layer x batch_size x len_src x hidden
out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, memory_bank, coverage
def step(self, input, context, memory_bank, mask_tgt, mask_src, pad_mask_tgt=None, pad_mask_src=None, buffer=None):
query = self.preprocess_attn(input, mask=pad_mask_tgt)
if buffer is not None:
buffer = torch.cat([buffer, query], dim=1)
else:
buffer = query
if memory_bank is None:
memory_bank = buffer.unsqueeze(0)
else:
memory_bank = torch.cat([memory_bank, buffer.unsqueeze(0)], dim=0) # batch_size x n_layer x len_src x hidden
out, _ = self.multihead_tgt(query, memory_bank, mask_tgt,
query_mask=pad_mask_tgt, value_mask=pad_mask_tgt)
input = self.postprocess_attn(out, input)
""" Context Attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_src_attn(input, mask=pad_mask_tgt)
out, coverage = self.multihead_src(query, context, mask_src,
query_mask=pad_mask_tgt, value_mask=pad_mask_src)
input = self.postprocess_src_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input, mask=pad_mask_tgt),
mask=pad_mask_tgt)
input = self.postprocess_ffn(out, input)
return input, memory_bank, coverage, buffer
| 21,054 | 38.801512 | 173 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/FCTransformer/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformer_layers import PositionalEncoding
from onmt.legacy.FCTransformer.Layers import FCTEncoderLayer, FCTDecoderLayer
from onmt.modules.base_seq2seq import NMTModel, Reconstructor
import onmt
from onmt.modules.dropout import embedded_dropout
from onmt.models.transformer_layers import XavierLinear, MultiHeadAttention, FeedForward, PrePostProcessing
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class FCTransformerEncoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt: list of options ( see train.py )
dicts : dictionary (for source language)
"""
def __init__(self, opt, dicts, positional_encoder):
super(FCTransformerEncoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.version = opt.version
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([FCTEncoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
def forward(self, input):
"""
Inputs Shapes:
input: batch_size x len_src (wanna tranpose)
Outputs Shapes:
out: batch_size x len_src x d_model
mask_src
"""
""" Embedding: batch_size x len_src x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
""" Scale the emb by sqrt(d_model) """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = input.data.eq(onmt.constants.PAD).unsqueeze(1) # batch_size x len_src x 1 for broadcasting
pad_mask = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
#~ pad_mask = None
context = emb.contiguous()
memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
context, memory_bank = checkpoint(custom_layer(layer), context, memory_bank, mask_src, pad_mask)
#~ print(type(context))
else:
context, memory_bank = layer(context, memory_bank, mask_src, pad_mask) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
context = self.postprocess_layer(context)
# make a huge memory bank on the encoder side
memory_bank = torch.cat([memory_bank, context.unsqueeze(0)], dim=0)
return memory_bank, mask_src
class FCTransformerDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(FCTransformerDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.version = opt.version
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
elif opt.time == 'gru':
self.time_transformer = nn.GRU(self.model_size, self.model_size, 1, batch_first=True)
elif opt.time == 'lstm':
self.time_transformer = nn.LSTM(self.model_size, self.model_size, 1, batch_first=True)
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
if self.version == 1.0:
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
self.layer_modules = nn.ModuleList([FCTDecoderLayer(self.n_heads, self.model_size, self.dropout, self.inner_size, self.attn_dropout) for _ in range(self.layers)])
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def renew_buffer(self, new_len):
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def forward(self, input, context, src):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
if len(self.layer_modules) - i <= onmt.constants.checkpointing and self.training:
output, memory_bank, coverage = checkpoint(custom_layer(layer), output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
else:
output, memory_bank, coverage = layer(output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt, pad_mask_src) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
def step(self, input, context, src, buffer=None):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
output_buffer = list()
batch_size = input.size(0)
input_ = input[:,-1].unsqueeze(1)
# print(input_.size())
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = self.time_transformer(emb, t=input.size(1))
else:
prev_h = buffer[0] if buffer is None else None
emb = self.time_transformer(emb, prev_h)
buffer[0] = emb[1]
if isinstance(emb, tuple):
emb = emb[0] # emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
# batch_size x 1 x len_src
mask_src = src.data.eq(onmt.constants.PAD).unsqueeze(1)
pad_mask_src = torch.autograd.Variable(src.data.ne(onmt.constants.PAD))
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
# mask_tgt = self.mask[:len_tgt, :len_tgt].unsqueeze(0).repeat(batch_size, 1, 1)
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
output = emb.contiguous()
pad_mask_tgt = torch.autograd.Variable(input.data.ne(onmt.constants.PAD)) # batch_size x len_src
pad_mask_src = torch.autograd.Variable(1 - mask_src.squeeze(1))
memory_bank = None
for i, layer in enumerate(self.layer_modules):
buffer_ = buffer[i] if buffer is not None else None
assert(output.size(1) == 1)
output, memory_bank, coverage, buffer_ = layer.step(output, context, memory_bank, mask_tgt, mask_src,
pad_mask_tgt=None, pad_mask_src=None, buffer=buffer_) # batch_size x len_src x d_model
output_buffer.append(buffer_)
buffer = torch.stack(output_buffer)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage, buffer
| 12,177 | 38.411003 | 170 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/LSTMLM/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformers import TransformerDecodingState
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
import onmt
from onmt.modules.dropout import embedded_dropout
#~ from onmt.modules.Checkpoint import checkpoint
from torch.utils.checkpoint import checkpoint
from collections import defaultdict
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.legacy.TransformerLM.Layers import LMDecoderLayer
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class LSTMLMDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts):
super().__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.encoder_type = opt.encoder_type
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.rnn = nn.LSTM(self.model_size, self.model_size, num_layers=3, dropout=self.dropout)
self.postprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.h = None
self.c = None
def renew_buffer(self, new_len):
return
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: (Variable) len_tgt x batch_size
Outputs Shapes:
out: len_tgt x batch_size x d_model
"""
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
emb = self.preprocess_layer(emb)
if self.h is None:
lstm_mem = None
else:
lstm_mem = (self.h.detach(), self.c.detach())
output, (h, c) = self.rnn(emb, lstm_mem)
output = self.postprocess_layer(output)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = output
output_dict['lstm_mem'] = (h, c)
self.h = h
self.c = c
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
buffers = decoder_state.attention_buffers
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
# output_buffer = list()
# batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
if isinstance(emb, tuple):
emb = emb[0]
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# print(mask_tgt)
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, mask_tgt,buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
class LSTMLM(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None):
super().__init__( encoder, decoder, generator)
self.model_size = self.decoder.model_size
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
# we only need target for language model
tgt = batch.get('target_input') # T x B
tgt_out = batch.get('target_output') # T x B
decoder_output = self.decoder(tgt)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = decoder_output['hidden']
return output_dict
def reset_states(self):
self.decoder.h = None
self.decoder.c = None
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
hidden, coverage = self.decoder.step(input_t, decoder_state)
log_prob = self.generator[0](hidden.squeeze(0))
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
return output_dict
# print a sample
def sample(self):
pass
def create_decoder_state(self, batch, beam_size=1):
return LSTMDecodingState(None, None, beam_size=beam_size, model_size=self.model_size)
class LSTMDecodingState(TransformerDecodingState):
def __init__(self, src, context, beam_size=1, model_size=512):
# if audio only take one dimension since only used for mask
self.beam_size = beam_size
self.input_seq = None
self.h = None
self.c = None
self.model_size = model_size
def update_beam(self, beam, b, remaining_sents, idx):
for tensor in [self.src, self.input_seq] :
if tensor is None:
continue
t_, br = tensor.size()
sent_states = tensor.view(t_, self.beam_size, remaining_sents)[:, :, idx]
sent_states.copy_(sent_states.index_select(
1, beam[b].getCurrentOrigin()))
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
if buffer_ is None:
continue
for k in buffer_:
t_, br_, d_ = buffer_[k].size()
sent_states = buffer_[k].view(t_, self.beam_size, remaining_sents, d_)[:, :, idx, :]
sent_states.data.copy_(sent_states.data.index_select(
1, beam[b].getCurrentOrigin()))
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
model_size = self.model_size
def update_active(t):
if t is None:
return t
# select only the remaining active sentences
view = t.data.view(-1, remaining_sents, model_size)
new_size = list(t.size())
new_size[-2] = new_size[-2] * len(active_idx) // remaining_sents
return view.index_select(1, active_idx).view(*new_size)
def update_active_2d(t):
if t is None:
return t
view = t.view(-1, remaining_sents)
new_size = list(t.size())
new_size[-1] = new_size[-1] * len(active_idx) // remaining_sents
new_t = view.index_select(1, active_idx).view(*new_size)
return new_t
self.context = update_active(self.context)
self.input_seq = update_active_2d(self.input_seq)
self.src = update_active_2d(self.src)
for l in self.attention_buffers:
buffer_ = self.attention_buffers[l]
for k in buffer_:
buffer_[k] = update_active(buffer_[k])
| 9,163 | 29.751678 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/FusionNetwork/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.modules.base_seq2seq import DecoderState
from onmt.models.transformers import TransformerDecodingState
from collections import defaultdict
import torch.nn.functional as F
class FusionNetwork(nn.Module):
"""Main model in 'Attention is all you need' """
def __init__(self, tm_model, lm_model):
super(FusionNetwork, self).__init__()
self.tm_model = tm_model
self.lm_model = lm_model
# freezing the parameters for the language model
for param in self.lm_model.parameters():
param.requires_grad = False
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
nmt_output_dict = self.tm_model(batch)
# no gradient for the LM side
with torch.no_grad():
lm_output_dict = self.lm_model(batch)
output_dict = defaultdict(lambda: None)
output_dict['tm'] = nmt_output_dict
output_dict['lm'] = lm_output_dict
return output_dict
# an utility function to fuse two states
# return log prob
def fuse_states(self, tm_state, lm_state):
# PRENORM algorithm
# (1) generate the log P_lm
with torch.no_grad():
log_lm = self.lm_model.generator[0](lm_state, log_softmax=True)
# (2) generate the logits for tm
tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
# (3) add the bias of lm to the logits
dists = F.log_softmax(tm_logits + log_lm, dim=-1)
# ## POSTNORM
# # (1) generate the P_lm
# with torch.no_grad():
# lm_logits = self.lm_model.generator[0](lm_state, log_softmax=False)
#
# # (2) generate the logits for tm
# tm_logits = self.tm_model.generator[0](tm_state, log_softmax=False)
#
# dists = F.log_softmax(F.softmax(tm_logits, dim=-1) * F.softmax(lm_logits, dim=-1), dim=-1)
return dists
def renew_buffer(self, new_len):
self.tm_model.decoder.renew_buffer(new_len)
self.lm_model.decoder.renew_buffer(new_len)
def decode(self, batch):
"""
:param batch: (onmt.Dataset.Batch) an object containing tensors needed for training
:return: gold_scores (torch.Tensor) log probs for each sentence
gold_words (Int) the total number of non-padded tokens
allgold_scores (list of Tensors) log probs for each word in the sentence
"""
src = batch.get('source')
tgt_input = batch.get('target_input')
tgt_output = batch.get('target_output')
# transpose to have batch first
src = src.transpose(0, 1)
tgt_input = tgt_input.transpose(0, 1)
batch_size = tgt_input.size(0)
# (1) we decode using language model
context = self.tm_model.encoder(src)['context']
if (hasattr(self,
'autoencoder') and self.autoencoder and self.autoencoder.representation == "EncoderHiddenState"):
context = self.autoencoder.autocode(context)
decoder_output = self.tm_model.decoder(tgt_input, context, src)['hidden']
output = decoder_output
if (hasattr(self, 'autoencoder')
and self.autoencoder and self.autoencoder.representation == "DecoderHiddenState"):
output = self.autoencoder.autocode(output)
gold_scores = context.new(batch_size).zero_()
gold_words = 0
allgold_scores = list()
# (2) decode using the language model
lm_decoder_output = self.lm_model.decoder(tgt_input)['hidden']
for dec_t, lm_t, tgt_t in zip(decoder_output, lm_decoder_output, tgt_output):
# generate the current step distribution from both states
gen_t = self.fuse_states(dec_t, lm_t)
tgt_t = tgt_t.unsqueeze(1)
scores = gen_t.gather(1, tgt_t)
scores.masked_fill_(tgt_t.eq(onmt.Constants.PAD), 0)
gold_scores += scores.squeeze(1).type_as(gold_scores)
gold_words += tgt_t.ne(onmt.Constants.PAD).sum().item()
allgold_scores.append(scores.squeeze(1).type_as(gold_scores))
return gold_words, gold_scores, allgold_scores
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object FusionDecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
# (1) decode using the translation model
tm_hidden, coverage = self.tm_model.decoder.step(input_t, decoder_state.tm_state)
# (2) decode using the translation model
lm_hidden, ________ = self.lm_model.decoder.step(input_t, decoder_state.lm_state)
log_prob = self.fuse_states(tm_hidden, lm_hidden)
# log_prob = self.tm_model.generator[0](tm_hidden)
last_coverage = coverage[:, -1, :].squeeze(1)
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
output_dict['coverage'] = last_coverage
return output_dict
def create_decoder_state(self, batch, beam_size=1):
"""
Generate a new decoder state based on the batch input
:param batch: Batch object (may not contain target during decoding)
:param beam_size: Size of beam used in beam search
:return:
"""
tm_decoder_state = self.tm_model.create_decoder_state(batch, beam_size=beam_size)
lm_decoder_state = self.lm_model.create_decoder_state(batch, beam_size=beam_size)
decoder_state = FusionDecodingState(tm_decoder_state, lm_decoder_state)
return decoder_state
class FusionDecodingState(DecoderState):
def __init__(self, tm_state, lm_state):
self.tm_state = tm_state
self.lm_state = lm_state
self.original_src = tm_state.original_src
self.beam_size = tm_state.beam_size
def update_beam(self, beam, b, remaining_sents, idx):
self.tm_state.update_beam(beam, b, remaining_sents, idx)
self.lm_state.update_beam(beam, b, remaining_sents, idx)
# in this section, the sentences that are still active are
# compacted so that the decoder is not run on completed sentences
def prune_complete_beam(self, active_idx, remaining_sents):
self.tm_state.prune_complete_beam(active_idx, remaining_sents)
self.lm_state.prune_complete_beam(active_idx, remaining_sents)
| 6,887 | 33.964467 | 109 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/TransformerLM/Layers.py | import math
import torch
import torch.nn as nn
import torch.nn.init as init
import onmt
import torch.nn.functional as F
from onmt.models.transformer_layers import PrePostProcessing, MultiHeadAttention, Bottle, FeedForward
class LMDecoderLayer(nn.Module):
"""Wraps multi-head attentions and position-wise feed forward into one layer of decoder
Args:
h: number of heads
d_model: dimension of model
p: dropout probabolity
d_ff: dimension of feed forward
Params:
multihead_tgt: multi-head self attentions layer
multihead_src: multi-head encoder-decoder attentions layer
feedforward: feed forward layer
Input Shapes:
query: batch_size x len_query x d_model
key: batch_size x len_key x d_model
value: batch_size x len_key x d_model
context: batch_size x len_src x d_model
mask_tgt: batch_size x len_query x len_key or broadcastable
mask_src: batch_size x len_query x len_src or broadcastable
Output Shapes:
out: batch_size x len_query x d_model
coverage: batch_size x len_query x len_key
"""
def __init__(self, h, d_model, p, d_ff, attn_p=0.1, ):
super(LMDecoderLayer, self).__init__()
self.preprocess_attn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_attn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.preprocess_ffn = PrePostProcessing(d_model, p, sequence='n')
self.postprocess_ffn = PrePostProcessing(d_model, p, sequence='da', static=onmt.constants.static)
self.multihead_tgt = MultiHeadAttention(h, d_model, attn_p=attn_p, static=onmt.constants.static, share=1)
ff_p = p
feedforward = FeedForward(d_model, d_ff, ff_p, static=onmt.constants.static)
self.feedforward = Bottle(feedforward)
def forward(self, input, mask_tgt):
""" Self attention layer
layernorm > attn > dropout > residual
"""
# input and context should be time first ?
query = self.preprocess_attn(input)
self_context = query
out, _ = self.multihead_tgt(query, self_context, self_context, mask_tgt)
input = self.postprocess_attn(out, input)
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
coverage = None
return input, coverage
def step(self, input, mask_tgt, buffer=None):
""" Self attention layer
layernorm > attn > dropout > residual
"""
query = self.preprocess_attn(input)
out, _, buffer = self.multihead_tgt.step(query, query, query, mask_tgt, buffer=buffer)
input = self.postprocess_attn(out, input)
coverage = None
""" Feed forward layer
layernorm > ffn > dropout > residual
"""
out = self.feedforward(self.preprocess_ffn(input))
input = self.postprocess_ffn(out, input)
return input, coverage, buffer
| 3,338 | 32.059406 | 113 | py |
NMTGMinor | NMTGMinor-master/onmt/legacy/TransformerLM/Models.py | import numpy as np
import torch, math
import torch.nn as nn
from onmt.models.transformers import TransformerDecodingState
from onmt.modules.base_seq2seq import NMTModel, Reconstructor, DecoderState
import onmt
from onmt.modules.dropout import embedded_dropout
#~ from onmt.modules.Checkpoint import checkpoint
from torch.utils.checkpoint import checkpoint
from collections import defaultdict
from onmt.models.transformer_layers import PositionalEncoding, PrePostProcessing
from onmt.legacy.TransformerLM.Layers import LMDecoderLayer
def custom_layer(module):
def custom_forward(*args):
output = module(*args)
return output
return custom_forward
class TransformerLMDecoder(nn.Module):
"""Encoder in 'Attention is all you need'
Args:
opt
dicts
"""
def __init__(self, opt, dicts, positional_encoder):
super(TransformerLMDecoder, self).__init__()
self.model_size = opt.model_size
self.n_heads = opt.n_heads
self.inner_size = opt.inner_size
self.layers = opt.layers
self.dropout = opt.dropout
self.word_dropout = opt.word_dropout
self.attn_dropout = opt.attn_dropout
self.emb_dropout = opt.emb_dropout
self.time = opt.time
self.encoder_type = opt.encoder_type
if opt.time == 'positional_encoding':
self.time_transformer = positional_encoder
else:
raise NotImplementedError
self.preprocess_layer = PrePostProcessing(self.model_size, self.emb_dropout, sequence='d', static=False)
self.postprocess_layer = PrePostProcessing(self.model_size, 0, sequence='n')
self.word_lut = nn.Embedding(dicts.size(),
self.model_size,
padding_idx=onmt.constants.PAD)
self.positional_encoder = positional_encoder
len_max = self.positional_encoder.len_max
mask = torch.ByteTensor(np.triu(np.ones((len_max,len_max)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
self.build_modules()
def build_modules(self):
self.layer_modules = nn.ModuleList([LMDecoderLayer(self.n_heads, self.model_size,
self.dropout, self.inner_size,
self.attn_dropout,
) for _ in range(self.layers)])
def renew_buffer(self, new_len):
print(new_len)
self.positional_encoder.renew(new_len)
mask = torch.ByteTensor(np.triu(np.ones((new_len,new_len)), k=1).astype('uint8'))
self.register_buffer('mask', mask)
def forward(self, input, **kwargs):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
""" Embedding: batch_size x len_tgt x d_model """
emb = embedded_dropout(self.word_lut, input, dropout=self.word_dropout if self.training else 0)
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
""" Adding positional encoding """
emb = self.time_transformer(emb)
if isinstance(emb, tuple):
emb = emb[0]
emb = self.preprocess_layer(emb)
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
output = emb.transpose(0, 1).contiguous()
for i, layer in enumerate(self.layer_modules):
output, coverage = layer(output, mask_tgt) # batch_size x len_src x d_model
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
output_dict = { 'hidden': output, 'coverage': coverage }
# return output, None
return output_dict
def step(self, input, decoder_state):
"""
Inputs Shapes:
input: (Variable) batch_size x len_tgt (wanna tranpose)
context: (Variable) batch_size x len_src x d_model
mask_src (Tensor) batch_size x len_src
buffer (List of tensors) List of batch_size * len_tgt-1 * d_model for self-attention recomputing
Outputs Shapes:
out: batch_size x len_tgt x d_model
coverage: batch_size x len_tgt x len_src
"""
buffers = decoder_state.attention_buffers
if decoder_state.input_seq is None:
decoder_state.input_seq = input
else:
# concatenate the last input to the previous input sequence
decoder_state.input_seq = torch.cat([decoder_state.input_seq, input], 0)
input = decoder_state.input_seq.transpose(0, 1)
input_ = input[:,-1].unsqueeze(1)
# output_buffer = list()
# batch_size = input_.size(0)
""" Embedding: batch_size x 1 x d_model """
emb = self.word_lut(input_)
""" Adding positional encoding """
if self.time == 'positional_encoding':
emb = emb * math.sqrt(self.model_size)
emb = self.time_transformer(emb, t=input.size(1))
else:
# prev_h = buffer[0] if buffer is None else None
# emb = self.time_transformer(emb, prev_h)
# buffer[0] = emb[1]
raise NotImplementedError
if isinstance(emb, tuple):
emb = emb[0]
# emb should be batch_size x 1 x dim
# Preprocess layer: adding dropout
emb = self.preprocess_layer(emb)
emb = emb.transpose(0, 1)
# batch_size x 1 x len_src
len_tgt = input.size(1)
mask_tgt = input.data.eq(onmt.constants.PAD).unsqueeze(1) + self.mask[:len_tgt, :len_tgt]
mask_tgt = torch.gt(mask_tgt, 0)
mask_tgt = mask_tgt[:, -1, :].unsqueeze(1)
# print(mask_tgt)
output = emb.contiguous()
for i, layer in enumerate(self.layer_modules):
buffer = buffers[i] if i in buffers else None
assert(output.size(0) == 1)
output, coverage, buffer = layer.step(output, mask_tgt,buffer=buffer)
decoder_state.update_attention_buffer(buffer, i)
# From Google T2T
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
output = self.postprocess_layer(output)
return output, coverage
class TransformerLM(NMTModel):
"""Main model in 'Attention is all you need' """
def __init__(self, encoder, decoder, generator=None):
super().__init__( encoder, decoder, generator)
self.model_size = self.decoder.model_size
def forward(self, batch):
"""
Inputs Shapes:
src: len_src x batch_size
tgt: len_tgt x batch_size
Outputs Shapes:
out: batch_size*len_tgt x model_size
"""
# we only need target for language model
tgt = batch.get('target_input')
tgt_out = batch.get('target_output')
tgt = tgt.transpose(0, 1)
decoder_output = self.decoder(tgt)
output_dict = defaultdict(lambda: None)
output_dict['hidden'] = decoder_output['hidden']
return output_dict
def step(self, input_t, decoder_state):
"""
Decoding function:
generate new decoder output based on the current input and current decoder state
the decoder state is updated in the process
:param input_t: the input word index at time t
:param decoder_state: object DecoderState containing the buffers required for decoding
:return: a dictionary containing: log-prob output and the attention coverage
"""
hidden, coverage = self.decoder.step(input_t, decoder_state)
log_prob = self.generator[0](hidden.squeeze(0))
output_dict = defaultdict(lambda: None)
output_dict['log_prob'] = log_prob
return output_dict
# print a sample
def sample(self):
pass
def create_decoder_state(self, batch, beam_size=1):
return TransformerDecodingState(None, None, beam_size=beam_size, model_size=self.model_size)
| 8,777 | 32.632184 | 112 | py |
pixyz | pixyz-main/setup.py | import io
import os
import re
from setuptools import setup, find_packages
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
with io.open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
setup(
name='pixyz',
version=find_version("pixyz", "__init__.py"),
packages=find_packages(),
url='https://github.com/masa-su/pixyz',
author='masa-su',
author_email='masa@weblab.t.u-tokyo.ac.jp',
description='Deep generative modeling library',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"torch>=1.0",
"scipy",
"numpy",
"sympy>=1.4",
"ipython",
"networkx",
],
extras_require={
'dev': ['pytest',
'flake8==3.9.2'
'pytest-cov',
'pytest-flake8',
'sphinx',
'sphinx_rtd_theme',
'twine',
"tqdm",
"torchvision",
"tensorboardX",
'sklearn'],
'test': ['pytest-cov',
'flake8==3.9.2',
'pytest-flake8',
'sphinx',
'sphinx_rtd_theme',
'tqdm',
'sklearn'],
},
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
"Operating System :: OS Independent",
],
)
| 2,028 | 26.053333 | 68 | py |
pixyz | pixyz-main/pixyz/utils.py | import functools
import torch
import sympy
from IPython.display import Math
import pixyz
_EPSILON = 1e-07
_CACHE_MAXSIZE = 2 * 10
def set_epsilon(eps):
"""Set a `epsilon` parameter.
Parameters
----------
eps : int or float
Returns
-------
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._EPSILON', 1e-07):
... set_epsilon(1e-06)
... epsilon()
1e-06
"""
global _EPSILON
_EPSILON = eps
def epsilon():
"""Get a `epsilon` parameter.
Returns
-------
int or float
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._EPSILON', 1e-07):
... epsilon()
1e-07
"""
return _EPSILON
def set_cache_maxsize(cache_maxsize):
"""Set a `cache_maxsize` parameter.
Parameters
----------
cache_maxsize : int
Returns
-------
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._CACHE_MAXSIZE', 100):
... set_cache_maxsize(100)
... cache_maxsize()
100
"""
global _CACHE_MAXSIZE
_CACHE_MAXSIZE = cache_maxsize
def cache_maxsize():
"""Get a `cache_maxsize` parameter.
Returns
-------
int
Examples
--------
>>> from unittest import mock
>>> with mock.patch('pixyz.utils._CACHE_MAXSIZE', 100):
... cache_maxsize()
100
"""
return _CACHE_MAXSIZE
def get_dict_values(dicts, keys, return_dict=False):
"""Get values from `dicts` specified by `keys`.
When `return_dict` is True, return values are in dictionary format.
Parameters
----------
dicts : dict
keys : list
return_dict : bool
Returns
-------
dict or list
Examples
--------
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b"])
[2]
>>> get_dict_values({"a":1,"b":2,"c":3}, ["b", "d"], True)
{'b': 2}
"""
new_dicts = dict((key, dicts[key]) for key in keys if key in list(dicts.keys()))
if return_dict is False:
return list(new_dicts.values())
return new_dicts
def delete_dict_values(dicts, keys):
"""Delete values from `dicts` specified by `keys`.
Parameters
----------
dicts : dict
keys : list
Returns
-------
new_dicts : dict
Examples
--------
>>> delete_dict_values({"a":1,"b":2,"c":3}, ["b","d"])
{'a': 1, 'c': 3}
"""
new_dicts = dict((key, value) for key, value in dicts.items() if key not in keys)
return new_dicts
def detach_dict(dicts):
"""Detach all values in `dicts`.
Parameters
----------
dicts : dict
Returns
-------
dict
"""
return {k: v.detach() for k, v in dicts.items()}
def replace_dict_keys(dicts, replace_list_dict):
""" Replace values in `dicts` according to `replace_list_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dicts : dict
Dictionary.
Examples
--------
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","b":"y"})
{'x': 1, 'y': 2, 'c': 3}
>>> replace_dict_keys({"a":1,"b":2,"c":3}, {"a":"x","e":"y"}) # keys of `replace_list_dict`
{'x': 1, 'b': 2, 'c': 3}
"""
replaced_dicts = dict([(replace_list_dict[key], value) if key in list(replace_list_dict.keys())
else (key, value) for key, value in dicts.items()])
return replaced_dicts
def replace_dict_keys_split(dicts, replace_list_dict):
""" Replace values in `dicts` according to :attr:`replace_list_dict`.
Replaced dict is splitted by :attr:`replaced_dict` and :attr:`remain_dict`.
Parameters
----------
dicts : dict
Dictionary.
replace_list_dict : dict
Dictionary.
Returns
-------
replaced_dict : dict
Dictionary.
remain_dict : dict
Dictionary.
Examples
--------
>>> replace_list_dict = {'a': 'loc'}
>>> x_dict = {'a': 0, 'b': 1}
>>> print(replace_dict_keys_split(x_dict, replace_list_dict))
({'loc': 0}, {'b': 1})
"""
replaced_dict = {replace_list_dict[key]: value for key, value in dicts.items()
if key in list(replace_list_dict.keys())}
remain_dict = {key: value for key, value in dicts.items()
if key not in list(replace_list_dict.keys())}
return replaced_dict, remain_dict
# immutable dict class
class FrozenSampleDict:
def __init__(self, dict_):
self.dict = dict_
def __hash__(self):
hashes = [(hash(key), hash(value)) for key, value in self.dict.items()]
return hash(tuple(hashes))
def __eq__(self, other):
class EqTensor:
def __init__(self, tensor):
self.tensor = tensor
def __eq__(self, other):
if not torch.is_tensor(self.tensor):
return self.tensor == other.tensor
return torch.all(self.tensor.eq(other.tensor))
return {key: EqTensor(value) for key, value in self.dict.items()} ==\
{key: EqTensor(value) for key, value in other.dict.items()}
def lru_cache_for_sample_dict():
"""
Memoize the calculation result linked to the argument of sample dict.
Note that dictionary arguments of the target function must be sample dict.
Returns
-------
decorator function
Examples
--------
>>> import time
>>> import torch.nn as nn
>>> import pixyz.utils as utils
>>> utils.set_cache_maxsize(2)
>>> import pixyz.distributions as pd
>>> class LongEncoder(pd.Normal):
... def __init__(self):
... super().__init__(var=['x'], cond_var=['y'])
... self.nn = nn.Sequential(*(nn.Linear(1,1) for i in range(10000)))
... def forward(self, y):
... return {'loc': self.nn(y), 'scale': torch.ones(1,1)}
... @lru_cache_for_sample_dict()
... def get_params(self, params_dict={}, **kwargs):
... return super().get_params(params_dict, **kwargs)
>>> def measure_time(func):
... start = time.time()
... func()
... elapsed_time = time.time() - start
... return elapsed_time
>>> le = LongEncoder()
>>> y = torch.ones(1, 1)
>>> t_sample1 = measure_time(lambda:le.sample({'y': y}))
>>> print ("sample1:{0}".format(t_sample1) + "[sec]") # doctest: +SKIP
>>> t_log_prob = measure_time(lambda:le.get_log_prob({'x': y, 'y': y}))
>>> print ("log_prob:{0}".format(t_log_prob) + "[sec]") # doctest: +SKIP
>>> t_sample2 = measure_time(lambda:le.sample({'y': y}))
>>> print ("sample2:{0}".format(t_sample2) + "[sec]") # doctest: +SKIP
>>> assert t_sample1 > t_sample2, "processing time increases: {0}".format(t_sample2 - t_sample1)
"""
maxsize = cache_maxsize()
raw_decorating_function = functools.lru_cache(maxsize=maxsize, typed=False)
def decorating_function(user_function):
def wrapped_user_function(sender, *args, **kwargs):
new_args = list(args)
new_kwargs = dict(kwargs)
for i in range(len(args)):
if isinstance(args[i], FrozenSampleDict):
new_args[i] = args[i].dict
for key in kwargs.keys():
if isinstance(kwargs[key], FrozenSampleDict):
new_kwargs[key] = kwargs[key].dict
return user_function(sender, *new_args, **new_kwargs)
def frozen(wrapper):
def frozen_wrapper(sender, *args, **kwargs):
new_args = list(args)
new_kwargs = dict(kwargs)
for i in range(len(args)):
if isinstance(args[i], list):
new_args[i] = tuple(args[i])
elif isinstance(args[i], dict):
new_args[i] = FrozenSampleDict(args[i])
for key in kwargs.keys():
if isinstance(kwargs[key], list):
new_kwargs[key] = tuple(kwargs[key])
elif isinstance(kwargs[key], dict):
new_kwargs[key] = FrozenSampleDict(kwargs[key])
result = wrapper(sender, *new_args, **new_kwargs)
return result
return frozen_wrapper
return frozen(raw_decorating_function(wrapped_user_function))
return decorating_function
def tolist(a):
"""Convert a given input to the dictionary format.
Parameters
----------
a : list or other
Returns
-------
list
Examples
--------
>>> tolist(2)
[2]
>>> tolist([1, 2])
[1, 2]
>>> tolist([])
[]
"""
if type(a) is list:
return a
return [a]
def sum_samples(samples, sum_dims=None):
"""Sum a given sample across the axes.
Parameters
----------
samples : torch.Tensor
Input sample.
sum_dims : torch.Size or list of int or None
Dimensions to reduce. If it is None, all dimensions are summed except for the first dimension.
Returns
-------
torch.Tensor
Sumed sample.
Examples
--------
>>> a = torch.ones([2])
>>> sum_samples(a).size()
torch.Size([2])
>>> a = torch.ones([2, 3])
>>> sum_samples(a).size()
torch.Size([2])
>>> a = torch.ones([2, 3, 4])
>>> sum_samples(a).size()
torch.Size([2])
"""
if sum_dims is not None:
if len(sum_dims) == 0:
return samples
return torch.sum(samples, dim=sum_dims)
dim = samples.dim()
if dim == 1:
return samples
dim_list = list(torch.arange(samples.dim()))
samples = torch.sum(samples, dim=dim_list[1:])
return samples
def print_latex(obj):
"""Print formulas in latex format.
Parameters
----------
obj : pixyz.distributions.distributions.Distribution, pixyz.losses.losses.Loss or pixyz.models.model.Model.
"""
if isinstance(obj, pixyz.distributions.distributions.Distribution):
latex_text = obj.prob_joint_factorized_and_text
elif isinstance(obj, pixyz.distributions.distributions.DistGraph):
latex_text = obj.prob_joint_factorized_and_text
elif isinstance(obj, pixyz.losses.losses.Loss):
latex_text = obj.loss_text
elif isinstance(obj, pixyz.models.model.Model):
latex_text = obj.loss_cls.loss_text
return Math(latex_text)
def convert_latex_name(name):
return sympy.latex(sympy.Symbol(name))
| 10,567 | 24.965602 | 111 | py |
pixyz | pixyz-main/pixyz/distributions/distributions.py | from __future__ import print_function
import torch
import re
import networkx as nx
from torch import nn
from ..utils import get_dict_values, replace_dict_keys, delete_dict_values,\
tolist, sum_samples, convert_latex_name, lru_cache_for_sample_dict
from ..losses import LogProb, Prob
def _make_prob_text(dist_name, var, cond_var):
var_text = ','.join(convert_latex_name(var_name) for var_name in var)
cond_text = '' if len(cond_var) == 0 else \
'|' + ','.join(convert_latex_name(var_name) for var_name in cond_var)
return f"{dist_name}({var_text}{cond_text})"
def _make_prob_equality_text(prob_text, prob_factorized_text):
if prob_factorized_text == prob_text:
return prob_text
else:
return f"{prob_text} = {prob_factorized_text}"
def _make_distribution_text(prob_joint_factorized_and_text, network_text):
# Distribution
text = f"Distribution:\n {prob_joint_factorized_and_text}\n"
# Network architecture (`repr`)
network_text = re.sub('^', ' ' * 2, str(network_text), flags=re.MULTILINE)
text += f"Network architecture:\n{network_text}"
return text
class Factor:
"""
This class wraps an atomic distribution as a factor node of a DistGraph.
It allocates new instance even if the same atomic distribution is specified.
This class assumes the lifespan of it is covered by the lifespan of the DistGraph.
"""
def __init__(self, atom_dist):
self.dist = atom_dist
self.name_dict = {}
self.option = {}
def copy(self):
inst = Factor(self.dist)
inst.name_dict = dict(self.name_dict)
inst.option = dict(self.option)
return inst
def rename_var(self, replace_dict):
name_dict = self.name_dict
# name_dict:global->local + replace:global->new_global = name_dict:new_global->local
for var_name, new_var_name in replace_dict.items():
if var_name in name_dict:
local_var = name_dict[var_name]
del name_dict[var_name]
name_dict[new_var_name] = local_var
else:
name_dict[new_var_name] = var_name
@property
def _reversed_name_dict(self):
return {value: key for key, value in self.name_dict.items()}
@staticmethod
def __apply_dict(dict, var):
return [dict[var_name] if var_name in dict else var_name for var_name in var]
def _get_local_input_dict(self, values, input_var=None):
if not input_var:
input_var = self.dist.input_var
global_input_var = self.__apply_dict(self._reversed_name_dict, input_var)
if any(var_name not in values for var_name in global_input_var):
raise ValueError("lack of some variables")
input_dict = get_dict_values(values, global_input_var, return_dict=True)
local_input_dict = replace_dict_keys(input_dict, self.name_dict)
return local_input_dict
def sample(self, values, sample_option):
local_input_dict = self._get_local_input_dict(values)
# Overwrite log_prob_option with self.option to give priority to local settings such as batch_n
option = dict(sample_option)
option.update(self.option)
local_output_dict = self.dist.sample(local_input_dict, **option)
# TODO: It shows return_hidden option change graphical model. This is bad operation.
ignore_hidden = ('return_hidden' in sample_option and sample_option['return_hidden'])
ignore_hidden |= ('return_hidden' in self.option and self.option['return_hidden'])
if not ignore_hidden and set(local_output_dict) != set(self.dist.var):
raise Exception(f"The sample method of {self.dist.distribution_name} returns different variables."
f" Expected:{list(self.dist.var)}, Got:{list(local_output_dict)}")
sample = replace_dict_keys(local_output_dict, self._reversed_name_dict)
return sample
def get_log_prob(self, values, log_prob_option):
local_input_dict = self._get_local_input_dict(values, list(self.dist.var) + list(self.dist.cond_var))
# Overwrite log_prob_option with self.option to give priority to local settings such as batch_n
option = dict(log_prob_option)
option.update(self.option)
log_prob = self.dist.get_log_prob(local_input_dict, **option)
return log_prob
def get_params(self, params_dict={}, **kwargs):
orig_params_dict = self._get_local_input_dict(params_dict)
params = self.dist.get_params(orig_params_dict, **kwargs)
return params
def sample_mean(self, values={}):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.sample_mean(local_input_dict)
return result
def sample_variance(self, values={}):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.sample_variance(local_input_dict)
return result
def get_entropy(self, values={}, sum_features=True, feature_dims=None):
local_input_dict = self._get_local_input_dict(values)
result = self.dist.get_entropy(local_input_dict, sum_features, feature_dims)
return result
@property
def input_var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.input_var)
@property
def var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.var)
@property
def cond_var(self):
return self.__apply_dict(self._reversed_name_dict, self.dist.cond_var)
@property
def prob_text(self):
return _make_prob_text(self.dist.name, self.var, self.cond_var)
def __str__(self):
prob_node_text = self.prob_text
factorized_text = self.dist.prob_factorized_text
if prob_node_text == factorized_text:
header_text = f"{prob_node_text}:\n"
else:
header_text = f"{prob_node_text} -> {self.dist.prob_joint_factorized_and_text}:\n"
return header_text + repr(self.dist)
class DistGraph(nn.Module):
"""
Graphical model class. This manages the graph of Graphical Model of distribution.
It is called from Distribution class.
"""
def __init__(self, original=None):
super().__init__()
self.graph = nx.DiGraph()
self.global_option = {}
self.marginalize_list = set()
self.name = ''
if original:
self._override_module(original)
self.graph = nx.relabel_nodes(original.graph,
{factor: factor.copy() for factor in original.factors()})
self.global_option.update(original.global_option)
self.marginalize_list.update(original.marginalize_list)
self.name = original.name
def _override_module(self, original: nn.Module):
name_offset = len(list(self.named_children()))
for i, (_, module) in enumerate(original.named_children()):
self.add_module(str(name_offset + i), module)
def appended(self, atom_dist):
""" Return new graph appended one node.
Parameters
----------
atom_dist : Distribution
Returns
-------
DistGraph
"""
new_instance = DistGraph(self)
if not new_instance.name:
new_instance.name = atom_dist.name
# factor node of an atomic distribution
factor = Factor(atom_dist)
new_instance.add_module(str(len(list(new_instance.factors()))), atom_dist)
new_instance.graph.add_node(factor)
for var_name in atom_dist.var:
if var_name in new_instance.graph:
raise ValueError(f"A new variable name '{var_name}' is already used in this graph.")
new_instance.graph.add_edge(factor, var_name)
for cond in atom_dist.cond_var:
new_instance.graph.add_edge(cond, factor)
return new_instance
def set_option(self, option_dict, var=[]):
""" Set option arguments which used when you call `sample` or `get_log_prob` methods.
Parameters
----------
option_dict: dict of str and any object
var: list of string
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
>>> # Set options only on the sampling start node
>>> dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
>>> sample = dist.sample()
>>> sample['y'].shape
torch.Size([2, 3, 4])
>>> sample['x'].shape
torch.Size([2, 3, 4])
"""
if not var:
self.global_option = option_dict
else:
for var_name in var:
for factor in self._factors_from_variable(var_name):
factor.option = option_dict
def united(self, other):
if not set(self.var + list(self.marginalize_list)).isdisjoint(set(other.var + list(other.marginalize_list))):
raise ValueError("There is var-name conflicts between two graphs.")
if not set(self.factors()).isdisjoint(set(other.factors())):
raise ValueError("The same instances of a distribution are used between two graphs.")
scg = DistGraph(self)
scg._override_module(other)
scg.graph.update(other.graph)
scg.global_option.update(other.global_option)
scg.marginalize_list.update(other.marginalize_list)
return scg
def marginalized(self, marginalize_list):
""" Return new graph marginalized some variables
Parameters
----------
marginalize_list : iterative of str
Returns
-------
DistGraph
Examples
--------
>>> import pixyz.distributions as pd
>>> dist = pd.Normal(var=['x']).marginalize_var(['x'])
Traceback (most recent call last):
...
ValueError: marginalize_list has unknown variables or it has all of variables of `p`.
>>> dist = (pd.Normal(var=['x'])*pd.Normal(var=['y'])).marginalize_var(['x'])
>>> dist.graph.marginalize_list
{'x'}
>>> dist.var
['y']
>>> dist.cond_var
[]
"""
marginalize_list = set(marginalize_list)
if len(marginalize_list) == 0:
raise ValueError("Length of `marginalize_list` must be at least 1, got 0.")
if not marginalize_list < set(self.var):
raise ValueError("marginalize_list has unknown variables or it has all of variables of `p`.")
new_graph = DistGraph(self)
new_graph.marginalize_list.update(marginalize_list)
return new_graph
def var_replaced(self, replace_dict):
r""" Returns new graph whose variables are replaced.
Parameters
----------
replace_dict: dict of str and str
Returns
-------
DistGraph
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import pixyz.distributions as pd
>>> normal = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> normal2 = pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> multi_dist = normal * normal2
>>> normal3 = pd.Normal(var=['z'], cond_var=['y'], loc='y', scale=torch.ones(1))
>>> multi_dist2 = multi_dist * normal3
>>> # 周辺化した変数へのリネームは許可しない
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='y')
Traceback (most recent call last):
...
ValueError: ['y', 'z'] are conflicted after replaced.
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='w', x='z')
>>> sample = dist3.sample()
>>> sample # doctest: +SKIP
{'w': tensor([[2.3206]]), 'z': tensor([[-0.5381]])}
>>> dist4 = multi_dist2.marginalize_var(['y']).replace_var(z='w', x='z').replace_var(z='a')
>>> print(dist4)
Distribution:
p(w,a) = \int p(a)p(w|y)p(y)dy
Network architecture:
p(y):
Normal(
name=p, distribution_name=Normal,
var=['y'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
p(w|y) -> p(z|y):
Normal(
name=p, distribution_name=Normal,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([1])
(scale): torch.Size([1, 1])
)
p(a) -> p(x):
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> print(repr(dist4))
DistGraph(
(0): Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
(1): Normal(
name=p, distribution_name=Normal,
var=['y'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
(2): Normal(
name=p, distribution_name=Normal,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([1])
(scale): torch.Size([1, 1])
)
)
"""
# check replace_dict
if not (set(replace_dict) <= set(self.all_var)):
unknown_var = [var_name for var_name in replace_dict.keys() if var_name not in self.all_var]
raise ValueError(f"replace_dict has unknown variables: {unknown_var}")
replaced_vars = [replace_dict[var_name] if var_name in replace_dict else var_name for var_name in self.all_var]
if len(self.all_var) != len(set(replaced_vars)):
duplicated_vars = [var_name for var_name in self.all_var
if replaced_vars.count(replace_dict[var_name]
if var_name in replace_dict else var_name) > 1]
raise ValueError(f"{duplicated_vars} are conflicted after replaced.")
result = DistGraph(original=self)
result.graph = nx.relabel_nodes(result.graph, replace_dict, copy=False)
result.marginalize_list = {replace_dict[var] if var in replace_dict else var for var in self.marginalize_list}
result.global_option = dict(self.global_option)
for factor in result.factors():
if set(replace_dict.values()).isdisjoint(list(result.graph.pred[factor]) + list(result.graph.succ[factor])):
continue
factor.rename_var(replace_dict)
return result
def _factors_from_variable(self, var_name):
return list(self.graph.pred[var_name])
def factors(self, sorted=False):
""" get factors of the DistGraph.
Parameters
----------
sorted: bool
the order of factors is topological sorted or not.
Returns
-------
iter of Factor
"""
nodes = nx.topological_sort(self.graph) if sorted else self.graph
for node in nodes:
if isinstance(node, Factor):
yield node
def distribution(self, var_name):
""" An atomic distribution of the specified variable.
Parameters
----------
var_name: str
Returns
-------
Distribution
"""
factors = self._factors_from_variable(var_name)
if len(factors) == 0:
raise ValueError(f"There is no distirbution about {var_name}.")
if len(factors) != 1:
raise NotImplementedError("multiple factors are not supported now.")
return factors[0].dist
@property
def all_var(self):
""" All variables in the DistGraph.
Returns
-------
list of str
"""
return [var_name for var_name in self.graph if isinstance(var_name, str)]
@property
def input_var(self):
""" conditional variables and observation variables in the DistGraph.
Returns
-------
list of str
"""
def is_input_var_node(var_name):
if not isinstance(var_name, str):
return False
if not self.graph.pred[var_name]:
return True
if var_name in self._factors_from_variable(var_name)[0].input_var:
return True
else:
return False
return [var_name for var_name in self.graph if is_input_var_node(var_name)]
@property
def cond_var(self):
""" conditional variables in the DistGraph.
Returns
-------
list of str
"""
return [var_name for var_name in self.graph if isinstance(var_name, str) and not self.graph.pred[var_name]]
@property
def var(self):
""" hidden variables in the DistGraph.
Returns
-------
list of str
"""
def is_var_node(var_name):
if not isinstance(var_name, str):
return False
if self.graph.pred[var_name] and var_name not in self.marginalize_list:
return True
else:
return False
return [var_name for var_name in self.graph if is_var_node(var_name)]
def forward(self, mode, kwargs):
if mode == 'sample':
return self._sample(**kwargs)
elif mode == 'get_log_prob':
return self._get_log_prob(**kwargs)
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
_kwargs = dict(x_dict=x_dict, batch_n=batch_n, sample_shape=sample_shape,
return_all=return_all, reparam=reparam, sample_mean=sample_mean)
_kwargs.update(kwargs)
return self('sample', kwargs=_kwargs)
def _sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
"""
Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import pixyz.distributions as pd
>>> # atomへのアクセスにはgraphは使われない.
>>> normal = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> normal.sample(batch_n=2, sample_shape=torch.Size((3, 4)),
... return_all=True, reparam=True)['x'].shape
torch.Size([3, 4, 2, 1])
>>> normal2 = pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> multi_dist = normal * normal2
>>> sample = multi_dist.sample()
>>> sample # doctest: +SKIP
{'y': tensor([[0.6635]]), 'x': tensor([[0.3966]])}
>>> sample = multi_dist.sample(batch_n=2)
>>> normal3 = pd.Normal(var=['z'], cond_var=['y'], loc='y', scale=torch.ones(1))
>>> wrong_dist = multi_dist * normal2
Traceback (most recent call last):
...
ValueError: There is var-name conflicts between two graphs.
>>> multi_dist2 = multi_dist * normal3
>>> # TODO: this issue will be solved at another pull request. distribution with cond_var has the problem.
>>> multi_dist2.sample(batch_n=2, sample_shape=(3, 4))
Traceback (most recent call last):
...
ValueError: Batch shape mismatch. batch_shape from parameters: torch.Size([3, 4, 2, 1])
specified batch size:2
>>> sample = multi_dist2.sample(batch_n=2)
>>> sample # doctest: +SKIP
{'y': tensor([[1.6723], [0.1929]]), 'z': tensor([[ 0.8572], [-0.5933]]), 'x': tensor([[-0.4255], [-0.4793]])}
>>> sample = multi_dist2.sample(sample_shape=(1,))
>>> sample # doctest: +SKIP
{'y': tensor([[[-0.8537]]]), 'z': tensor([[[[-2.1819]]]]), 'x': tensor([[[-0.0797]]])}
>>> # return_all=Falseで条件付けられた変数や使用しなかった変数を含まない戻り値を得る
>>> normal4 = pd.Normal(var=['a'], cond_var=['b'], loc='b', scale=torch.ones(1))
>>> dist3 = multi_dist2.marginalize_var(['y']).replace_var(z='w').replace_var(x='z').replace_var(z='x')*normal4
>>> sample = dist3.sample(x_dict={'b': torch.ones(2, 1), 'c': torch.zeros(1)}, return_all=False)
>>> sample.keys()
dict_keys(['a', 'w', 'x'])
>>> from pixyz.distributions import Normal, Categorical
>>> from pixyz.distributions.mixture_distributions import MixtureModel
>>> z_dim = 3 # the number of mixture
>>> x_dim = 2 # the input dimension.
>>> distributions = [] # the list of distributions
>>> for i in range(z_dim):
... loc = torch.randn(x_dim) # initialize the value of location (mean)
... scale = torch.empty(x_dim).fill_(1.) # initialize the value of scale (variance)
... distributions.append(Normal(loc=loc, scale=scale, var=["y"], name="p_%d" %i))
>>> probs = torch.empty(z_dim).fill_(1. / z_dim) # initialize the value of probabilities
>>> prior = Categorical(probs=probs, var=["z"], name="prior")
>>> p = MixtureModel(distributions=distributions, prior=prior)
>>> dist = normal*p
>>> dist.graph.set_option({'return_hidden': True}, var=['y'])
>>> list(dist.sample().keys())
['y', 'z', 'x']
"""
sample_option = dict(self.global_option)
sample_option.update(dict(batch_n=batch_n, sample_shape=sample_shape,
return_all=False, reparam=reparam, sample_mean=sample_mean))
sample_option.update(kwargs)
# ignore return_all because overriding is now under control.
if not(set(x_dict) >= set(self.input_var)):
raise ValueError(f"Input keys are not valid, expected {set(self.input_var)} but got {set(x_dict)}.")
values = get_dict_values(x_dict, self.input_var, return_dict=True)
for factor in self.factors(sorted=True):
sample = factor.sample(values, sample_option)
values.update(sample)
result_dict = delete_dict_values(values, self.marginalize_list)
if return_all:
output_dict = dict(delete_dict_values(x_dict, self.input_var))
output_dict.update(result_dict)
return output_dict
else:
return delete_dict_values(result_dict, self.input_var)
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
return self(mode='get_log_prob', kwargs={'x_dict': x_dict, 'sum_features': sum_features,
'feature_dims': feature_dims})
def _get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
""" Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> from pixyz.distributions.distributions import DistGraph
>>> import torch
>>> import pixyz.distributions as pd
>>> # atomへのアクセスにはgraphは使われない.
>>> pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1)).get_log_prob({'x': torch.zeros(1, 1)})
tensor([-0.9189])
>>> # 同時分布などにはDistGraphが使われる
>>> dist = pd.Normal(var=['x'], loc=torch.zeros(1), scale=torch.ones(1))
>>> dist *= pd.Normal(var=['y'], loc=torch.zeros(1), scale=torch.ones(1))
>>> dist = dist.replace_var(y='z')
>>> dist.get_log_prob({'x': torch.zeros(1, 1), 'z': torch.zeros(1, 1)})
tensor([-1.8379])
>>> # 周辺化がある場合,対数尤度は計算されない.
>>> m_dist = dist.marginalize_var(['z'])
>>> m_dist.get_log_prob({'x': torch.zeros(1, 1)})
Traceback (most recent call last):
...
NotImplementedError
"""
# """
# >>> # 確率変数の周辺化がある場合,対数尤度は計算されない.
# >>> m_dist = dist.marginalize_var(['z'])
# >>> m_dist.get_log_prob({'x': torch.zeros(1, 1)})
# Traceback (most recent call last):
# ...
# ValueError: This distribution is marginalized by the stochastic variables '['z']'. Log probability of it can not be calcurated.
# >>> # 決定論的な変数の周辺化がある場合,決定論的な変数が一致する前提で対数尤度が計算される.
# >>> class MyDeterministic(pd.Deterministic):
# ... def forward(self):
# ... return {'x': torch.zeros(1, 1)}
# >>> dist = MyDeterministic(var=['x'])
# >>> dist *= pd.Normal(var=['y'], cond_var=['x'], loc='x', scale=torch.ones(1))
# >>> dist.get_log_prob({'y': torch.zeros(1, 1), 'x': torch.zeros(1, 1)})
# Traceback (most recent call last):
# ...
# NotImplementedError: Log probability of deterministic distribution is not defined.
# >>> m_dist = dist.marginalize_var(['x'])
# >>> m_dist.get_log_prob({'y': torch.zeros(1, 1)})
# tensor([-0.9189])
# """
sample_option = dict(self.global_option)
# sample_option.update(dict(batch_n=batch_n, sample_shape=sample_shape, return_all=False))
if len(self.marginalize_list) != 0:
raise NotImplementedError()
log_prob_option = dict(self.global_option)
log_prob_option.update(dict(sum_features=sum_features, feature_dims=feature_dims))
log_prob_option.update(kwargs)
require_var = self.var + self.cond_var
if not(set(x_dict) >= set(require_var)):
raise ValueError(f"Input keys are not valid, expected {set(require_var)}"
f" but got {set(x_dict)}.")
values = get_dict_values(x_dict, require_var, return_dict=True)
log_prob = None
prev_dist = None
for factor in self.factors(sorted=True):
local_var = self.graph.succ[factor]
local_marginalized_var = [var_name for var_name in local_var if var_name in self.marginalize_list]
if len(local_marginalized_var) != 0:
if any(var_name in values for var_name in local_marginalized_var):
raise ValueError(f"The marginalized variables '{local_marginalized_var}'"
f" appears in the dictionary: {x_dict}.")
if factor.dist.distribution_name != "Deterministic":
raise ValueError(f"This distribution is marginalized by the stochastic variables '{local_marginalized_var}'."
f" Log probability of it can not be calcurated.")
if set(local_var) != set(local_marginalized_var):
raise ValueError("Some deterministic variables are not marginalized.")
# batch_nに関しては後続の変数に与えられた値で判断できる,sample_shapeはnamed_shapeなら解決できそう
sample = factor.sample(values, sample_option)
values.update(sample)
continue
new_log_prob = factor.get_log_prob(values, log_prob_option)
if log_prob is None:
log_prob = new_log_prob
else:
if log_prob.size() != new_log_prob.size():
raise ValueError(f"Two PDFs, {prev_dist.prob_text} and {factor.dist.prob_text}, have different sizes,"
" so you must modify these tensor sizes.")
log_prob += new_log_prob
prev_dist = factor.dist
if log_prob is None:
return 0
return log_prob
def get_params(self, params_dict={}, **kwargs):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.get_params(params_dict, **kwargs)
return result
def sample_mean(self, x_dict={}):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.sample_variance(x_dict)
return result
def sample_variance(self, x_dict={}):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.sample_variance(x_dict)
return result
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
if len(self.var) != 1:
raise NotImplementedError()
for factor in self.factors():
result = factor.get_entropy(x_dict, sum_features, feature_dims)
return result
@property
def has_reparam(self):
return all(factor.dist.has_reparam for factor in self.factors())
def __str__(self):
network_text = "\n".join(str(factor) for factor in self.factors(sorted=True))
return _make_distribution_text(self.prob_joint_factorized_and_text, network_text)
@property
def prob_text(self):
return _make_prob_text(self.name, self.var, self.cond_var)
@property
def prob_factorized_text(self):
text = ""
for factor in self.factors(sorted=True):
text = factor.prob_text + text
if self.marginalize_list:
integral_symbol = len(self.marginalize_list) * "\\int "
integral_variables = ["d" + convert_latex_name(var) for var in self.marginalize_list]
integral_variables = "".join(integral_variables)
return f"{integral_symbol}{text}{integral_variables}"
return text
@property
def prob_joint_factorized_and_text(self):
return _make_prob_equality_text(self.prob_text, self.prob_factorized_text)
def visible_graph(self, dotmode=False):
visible_graph = nx.DiGraph()
def dont_esc(name: str):
return f"${name}$"
for factor in self.factors():
for var_name in factor.var:
for cond_var_name in factor.cond_var:
if dotmode:
visible_graph.add_edge(cond_var_name, var_name)
else:
visible_graph.add_edge(dont_esc(cond_var_name), dont_esc(var_name))
if dotmode:
for var_name in visible_graph:
visible_graph.add_node(var_name, texlbl=dont_esc(var_name))
return visible_graph
class Distribution(nn.Module):
"""Distribution class. In Pixyz, all distributions are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[64], name="p1")
>>> print(p1)
Distribution:
p_{1}(x)
Network architecture:
Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([64])
(loc): torch.Size([1, 64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[64], name="p2")
>>> print(p2)
Distribution:
p_{2}(x|y)
Network architecture:
Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([64])
(scale): torch.Size([1, 64])
)
>>> # Conditional distribution (by neural networks)
>>> class P(Normal):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["y"],name="p3")
... self.model_loc = nn.Linear(128, 64)
... self.model_scale = nn.Linear(128, 64)
... def forward(self, y):
... return {"loc": self.model_loc(y), "scale": F.softplus(self.model_scale(y))}
>>> p3 = P()
>>> print(p3)
Distribution:
p_{3}(x|y)
Network architecture:
P(
name=p_{3}, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
(model_loc): Linear(in_features=128, out_features=64, bias=True)
(model_scale): Linear(in_features=128, out_features=64, bias=True)
)
"""
def __init__(self, var, cond_var=[], name="p", features_shape=torch.Size(), atomic=True):
"""
Parameters
----------
var : :obj:`list` of :obj:`str`
Variables of this distribution.
cond_var : :obj:`list` of :obj:`str`, defaults to []
Conditional variables of this distribution.
In case that cond_var is not empty, we must set the corresponding inputs to sample variables.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
super().__init__()
_vars = cond_var + var
if len(_vars) != len(set(_vars)):
raise ValueError("There are conflicted variables.")
self._cond_var = cond_var
self._var = var
self._name = convert_latex_name(name)
self._atomic = atomic
if atomic and len(var) == 0:
raise ValueError("At least one variable is required for an atomic distribution.")
self._graph = None
self._features_shape = torch.Size(features_shape)
@property
def graph(self):
if self._atomic:
if not self._graph:
# (graph,) for escaping meta-language of nn.Module
self._graph = (DistGraph().appended(atom_dist=self),)
return self._graph[0]
else:
return self._graph
@property
def distribution_name(self):
"""str: Name of this distribution class."""
return ""
@property
def name(self):
"""str: Name of this distribution displayed in :obj:`prob_text` and :obj:`prob_factorized_text`."""
return self._name
@name.setter
def name(self, name):
if type(name) is str:
self._name = name
if self._atomic:
self.graph.name = name
return
raise ValueError("Name of the distribution class must be a string type.")
@property
def var(self):
"""list: Variables of this distribution."""
return self._var if self._atomic else self.graph.var
@property
def cond_var(self):
"""list: Conditional variables of this distribution."""
return self._cond_var if self._atomic else self.graph.cond_var
@property
def input_var(self):
"""list: Input variables of this distribution.
Normally, it has same values as :attr:`cond_var`.
"""
return self._cond_var if self._atomic else self.graph.input_var
@property
def prob_text(self):
"""str: Return a formula of the (joint) probability distribution."""
if not self._atomic:
return self.graph.prob_text
return _make_prob_text(self._name, self.var, self.cond_var)
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
if not self._atomic:
return self.graph.prob_factorized_text
return self.prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized and the (joint) probability distributions."""
if not self._atomic:
return self.graph.prob_joint_factorized_and_text
return _make_prob_equality_text(self.prob_text, self.prob_factorized_text)
@property
def features_shape(self):
"""torch.Size or list: Shape of features of this distribution."""
return self._features_shape
def _get_input_dict(self, input, var=None):
"""Check the type of given input.
If the input type is :obj:`dict`, this method checks whether the input keys contains the :attr:`var` list.
In case that its type is :obj:`list` or :obj:`tensor`, it returns the output formatted in :obj:`dict`.
Parameters
----------
input : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`
Input variables.
var : :obj:`list` or :obj:`NoneType`, defaults to None
Variables to check if given input contains them.
This is set to None by default.
Returns
-------
input_dict : dict
Variables checked in this method.
Raises
------
ValueError
Raises `ValueError` if the type of input is neither :obj:`torch.Tensor`, :obj:`list`, nor :obj:`dict.
"""
if var is None:
var = self.input_var
if type(input) is torch.Tensor:
input_dict = {var[0]: input}
elif type(input) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
input_dict = dict(zip(var, input))
elif type(input) is dict:
if not (set(input) >= set(var)):
raise ValueError(f"Input keys are not valid, expected {set(var)} but got {set(input)}.")
input_dict = get_dict_values(input, var, return_dict=True)
else:
raise ValueError("The type of input is not valid, got %s." % type(input))
return input_dict
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True,
reparam=False, sample_mean=False, **kwargs):
"""Sample variables of this distribution.
If :attr:`cond_var` is not empty, you should set inputs as :obj:`dict`.
Parameters
----------
x_dict : :obj:`torch.Tensor`, :obj:`list`, or :obj:`dict`, defaults to {}
Input variables.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
return_all : :obj:`bool`, defaults to True
Choose whether the output contains input variables.
reparam : :obj:`bool`, defaults to False.
Choose whether we sample variables with re-parameterized trick.
Returns
-------
output : dict
Samples of this distribution.
Examples
--------
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p = Normal(loc=0, scale=1, var=["x"], features_shape=[10, 2])
>>> print(p)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([10, 2])
(loc): torch.Size([1, 10, 2])
(scale): torch.Size([1, 10, 2])
)
>>> p.sample()["x"].shape # (batch_n=1, features_shape)
torch.Size([1, 10, 2])
>>> p.sample(batch_n=20)["x"].shape # (batch_n, features_shape)
torch.Size([20, 10, 2])
>>> p.sample(batch_n=20, sample_shape=[40, 30])["x"].shape # (sample_shape, batch_n, features_shape)
torch.Size([40, 30, 20, 10, 2])
>>> # Conditional distribution
>>> p = Normal(loc="y", scale=1., var=["x"], cond_var=["y"], features_shape=[10])
>>> print(p)
Distribution:
p(x|y)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([10])
(scale): torch.Size([1, 10])
)
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> sample_a = torch.randn(1, 10) # Psuedo data
>>> sample = p.sample({"y": sample_y})
>>> print(sample) # input_var + var # doctest: +SKIP
{'y': tensor([[-0.5182, 0.3484, 0.9042, 0.1914, 0.6905,
-1.0859, -0.4433, -0.0255, 0.8198, 0.4571]]),
'x': tensor([[-0.7205, -1.3996, 0.5528, -0.3059, 0.5384,
-1.4976, -0.1480, 0.0841,0.3321, 0.5561]])}
>>> sample = p.sample({"y": sample_y, "a": sample_a}) # Redundant input ("a")
>>> print(sample) # input_var + var + "a" (redundant input) # doctest: +SKIP
{'y': tensor([[ 1.3582, -1.1151, -0.8111, 1.0630, 1.1633,
0.3855, 2.6324, -0.9357, -0.8649, -0.6015]]),
'a': tensor([[-0.1874, 1.7958, -1.4084, -2.5646, 1.0868,
-0.7523, -0.0852, -2.4222, -0.3914, -0.9755]]),
'x': tensor([[-0.3272, -0.5222, -1.3659, 1.8386, 2.3204,
0.3686, 0.6311, -1.1208, 0.3656, -0.6683]])}
"""
if self.graph:
return self.graph.sample(x_dict, batch_n, sample_shape, return_all, reparam, sample_mean, **kwargs)
raise NotImplementedError()
@property
def has_reparam(self):
if self.graph:
return self.graph.has_reparam
raise NotImplementedError()
def sample_mean(self, x_dict={}):
"""Return the mean of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> mean = p1.sample_mean()
>>> print(mean)
tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> mean = p2.sample_mean({"y": sample_y})
>>> print(mean) # doctest: +SKIP
tensor([[-0.2189, -1.0310, -0.1917, -0.3085, 1.5190, -0.9037, 1.2559, 0.1410,
1.2810, -0.6681]])
"""
if self.graph:
return self.graph.sample_mean(x_dict)
raise NotImplementedError()
def sample_variance(self, x_dict={}):
"""Return the variance of the distribution.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Parameters of this distribution.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> var = p1.sample_variance()
>>> print(var)
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> var = p2.sample_variance({"y": sample_y})
>>> print(var) # doctest: +SKIP
tensor([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]])
"""
if self.graph:
return self.graph.sample_variance(x_dict)
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
"""Giving variables, this method returns values of log-pdf.
Parameters
----------
x_dict : dict
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by `feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
log_prob : torch.Tensor
Values of log-probability density/mass function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
if self.graph:
return self.graph.get_log_prob(x_dict, sum_features, feature_dims, **kwargs)
raise NotImplementedError()
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
"""Giving variables, this method returns values of entropy.
Parameters
----------
x_dict : dict, defaults to {}
Input variables.
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some dimensions which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output.
Returns
-------
entropy : torch.Tensor
Values of entropy.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> entropy = p1.get_entropy()
>>> print(entropy)
tensor([14.1894])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> entropy = p2.get_entropy({"y": sample_y})
>>> print(entropy)
tensor([14.1894])
"""
if self.graph:
return self.graph.get_entropy(x_dict, sum_features, feature_dims)
raise NotImplementedError()
def get_params(self, params_dict={}, **kwargs):
if self.graph:
return self.graph.get_params(params_dict, **kwargs)
raise NotImplementedError()
def log_prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.LogProb`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Whether the output is summed across some axes (dimensions) which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set axes to sum across the output.
Returns
-------
pixyz.losses.LogProb
An instance of :class:`pixyz.losses.LogProb`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> log_prob = p1.log_prob().eval({"x": sample_x})
>>> print(log_prob) # doctest: +SKIP
tensor([-16.1153])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> log_prob = p2.log_prob().eval({"x": sample_x, "y": sample_y})
>>> print(log_prob) # doctest: +SKIP
tensor([-21.5251])
"""
return LogProb(self, sum_features=sum_features, feature_dims=feature_dims)
def prob(self, sum_features=True, feature_dims=None):
"""Return an instance of :class:`pixyz.losses.Prob`.
Parameters
----------
sum_features : :obj:`bool`, defaults to True
Choose whether the output is summed across some axes (dimensions)
which are specified by :attr:`feature_dims`.
feature_dims : :obj:`list` or :obj:`NoneType`, defaults to None
Set dimensions to sum across the output. (Note: this parameter is not used for now.)
Returns
-------
pixyz.losses.Prob
An instance of :class:`pixyz.losses.Prob`
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> # Marginal distribution
>>> p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10], name="p1")
>>> sample_x = torch.randn(1, 10) # Psuedo data
>>> prob = p1.prob().eval({"x": sample_x})
>>> print(prob) # doctest: +SKIP
tensor([4.0933e-07])
>>> # Conditional distribution
>>> p2 = Normal(loc="y", scale=torch.tensor(1.), var=["x"], cond_var=["y"],
... features_shape=[10], name="p2")
>>> sample_y = torch.randn(1, 10) # Psuedo data
>>> prob = p2.prob().eval({"x": sample_x, "y": sample_y})
>>> print(prob) # doctest: +SKIP
tensor([2.9628e-09])
"""
return Prob(self, sum_features=sum_features, feature_dims=feature_dims)
def forward(self, *args, **kwargs):
"""When this class is inherited by DNNs, this method should be overrided."""
raise NotImplementedError()
def replace_var(self, **replace_dict):
"""Return an instance of :class:`pixyz.distributions.ReplaceVarDistribution`.
Parameters
----------
replace_dict : dict
Dictionary.
Returns
-------
pixyz.distributions.ReplaceVarDistribution
An instance of :class:`pixyz.distributions.ReplaceVarDistribution`
"""
return ReplaceVarDistribution(self, replace_dict)
def marginalize_var(self, marginalize_list):
"""Return an instance of :class:`pixyz.distributions.MarginalizeVarDistribution`.
Parameters
----------
marginalize_list : :obj:`list` or other
Variables to marginalize.
Returns
-------
pixyz.distributions.MarginalizeVarDistribution
An instance of :class:`pixyz.distributions.MarginalizeVarDistribution`
"""
marginalize_list = tolist(marginalize_list)
return MarginalizeVarDistribution(self, marginalize_list)
def __mul__(self, other):
return MultiplyDistribution(self, other)
def __str__(self):
if not self._atomic:
return str(self.graph)
network_text = self.__repr__()
return _make_distribution_text(self.prob_joint_factorized_and_text, network_text)
def extra_repr(self):
# parameters
parameters_text = f'name={self.name}, distribution_name={self.distribution_name},\n' \
f'var={self.var}, cond_var={self.cond_var}, input_var={self.input_var}, ' \
f'features_shape={self.features_shape}'
if len(self._buffers) != 0:
# add buffers to repr
buffers = [f"({key}): {value.shape}" for key, value in self._buffers.items()]
return parameters_text + "\n" + "\n".join(buffers)
return parameters_text
class DistributionBase(Distribution):
"""Distribution class with PyTorch. In Pixyz, all distributions are required to inherit this class."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), **kwargs):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self._set_buffers(**kwargs)
self._dist = None
def _set_buffers(self, **params_dict):
"""Format constant parameters of this distribution as buffers.
Parameters
----------
params_dict : dict
Constant parameters of this distribution set at initialization.
If the values of these dictionaries contain parameters which are named as strings, which means that
these parameters are set as `variables`, the correspondences between these values and the true name of
these parameters are stored as :obj:`dict` (:attr:`replace_params_dict`).
"""
self.replace_params_dict = {}
for key, value in params_dict.items():
if type(value) is str:
if value in self._cond_var:
if value not in self.replace_params_dict:
self.replace_params_dict[value] = []
self.replace_params_dict[value].append(key)
else:
raise ValueError(f"parameter setting {key}:{value} is not valid"
f" because cond_var does not contains {value}.")
elif isinstance(value, torch.Tensor) \
or isinstance(value, float) or isinstance(value, int):
if not isinstance(value, torch.Tensor):
features = torch.tensor(value, dtype=torch.float)
else:
features = value
features_checked = self._check_features_shape(features)
# clone features to make it contiguous & to make it independent.
self.register_buffer(key, features_checked.clone())
else:
raise ValueError(f"The types that can be specified as parameters of distribution"
f" are limited to str & torch.Tensor. Got: {type(value)}")
def _check_features_shape(self, features):
# scalar
if features.size() == torch.Size():
features = features.expand(self.features_shape)
if self.features_shape == torch.Size():
self._features_shape = features.shape
if features.size() == self.features_shape:
batches = features.unsqueeze(0)
return batches
raise ValueError(f"the shape of a given parameter {features.size()}"
f" and features_shape {self.features_shape} do not match.")
@property
def params_keys(self):
"""list: Return the list of parameter names for this distribution."""
raise NotImplementedError()
@property
def distribution_torch_class(self):
"""Return the class of PyTorch distribution."""
raise NotImplementedError()
@property
def dist(self):
"""Return the instance of PyTorch distribution."""
return self._dist
def set_dist(self, x_dict={}, batch_n=None, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError(f"{type(self)} class requires following parameters: {set(self.params_keys)}\n"
f"but got {set(params.keys())}")
self._dist = self.distribution_torch_class(**params)
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError(f"Batch shape mismatch. batch_shape from parameters: {batch_shape}\n"
f" specified batch size:{batch_n}")
def get_sample(self, reparam=False, sample_shape=torch.Size()):
"""Get a sample_shape shaped sample from :attr:`dist`.
Parameters
----------
reparam : :obj:`bool`, defaults to True.
Choose where to sample using re-parameterization trick.
sample_shape : :obj:`tuple` or :obj:`torch.Size`, defaults to torch.Size().
Set the shape of a generated sample.
Returns
-------
samples_dict : dict
Generated sample formatted by :obj:`dict`.
"""
if reparam and self.dist.has_rsample:
_samples = self.dist.rsample(sample_shape=sample_shape)
else:
_samples = self.dist.sample(sample_shape=sample_shape)
samples_dict = {self._var[0]: _samples}
return samples_dict
@property
def has_reparam(self):
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict)
x_targets = get_dict_values(x_dict, self._var)
if len(x_targets) == 0:
raise ValueError(f"x_dict has no value of the stochastic variable. x_dict: {x_dict}")
log_prob = self.dist.log_prob(*x_targets)
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
"""This method aims to get parameters of this distributions from constant parameters set in initialization
and outputs of DNNs.
Parameters
----------
params_dict : :obj:`dict`, defaults to {}
Input parameters.
Returns
-------
output_dict : dict
Output parameters.
Examples
--------
>>> from pixyz.distributions import Normal
>>> dist_1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[1])
>>> print(dist_1)
Distribution:
p(x)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([1])
(loc): torch.Size([1, 1])
(scale): torch.Size([1, 1])
)
>>> dist_1.get_params()
{'loc': tensor([[0.]]), 'scale': tensor([[1.]])}
>>> dist_2 = Normal(loc=torch.tensor(0.), scale="z", cond_var=["z"], var=["x"])
>>> print(dist_2)
Distribution:
p(x|z)
Network architecture:
Normal(
name=p, distribution_name=Normal,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(loc): torch.Size([1])
)
>>> dist_2.get_params({"z": torch.tensor(1.)})
{'scale': tensor(1.), 'loc': tensor([0.])}
"""
replaced_params_dict = {}
for key, value in params_dict.items():
if key in self.replace_params_dict:
for replaced_key in self.replace_params_dict[key]:
replaced_params_dict[replaced_key] = value
vars_dict = {key: value for key, value in params_dict.items() if key not in self.replace_params_dict}
output_dict = self(**vars_dict)
output_dict.update(replaced_params_dict)
# append constant parameters to output_dict
constant_params_dict = get_dict_values(dict(self.named_buffers()), self.params_keys,
return_dict=True)
output_dict.update(constant_params_dict)
return output_dict
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
_x_dict = get_dict_values(x_dict, self._cond_var, return_dict=True)
self.set_dist(_x_dict)
entropy = self.dist.entropy()
if sum_features:
entropy = sum_samples(entropy, feature_dims)
return entropy
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.mean
def sample_variance(self, x_dict={}):
self.set_dist(x_dict)
return self.dist.variance
def forward(self, **params):
return params
@property
def prob_factorized_text(self):
"""str: Return a formula of the factorized probability distribution."""
return self.graph.prob_text
class MultiplyDistribution(Distribution):
"""Multiply by given distributions, e.g, :math:`p(x,y|z) = p(x|z,y)p(y|z)`.
In this class, it is checked if two distributions can be multiplied.
p(x|z)p(z|y) -> Valid
p(x|z)p(y|z) -> Valid
p(x|z)p(y|a) -> Valid
p(x|z)p(z|x) -> Invalid (recursive)
p(x|z)p(x|y) -> Invalid (conflict)
Examples
--------
>>> a = DistributionBase(var=["x"],cond_var=["z"])
>>> b = DistributionBase(var=["z"],cond_var=["y"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,z|y) = p(x|z)p(z|y)
Network architecture:
p(z|y):
DistributionBase(
name=p, distribution_name=,
var=['z'], cond_var=['y'], input_var=['y'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"],cond_var=["z"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> b = DistributionBase(var=["y"],cond_var=["a"])
>>> p_multi = MultiplyDistribution(a, b)
>>> print(p_multi)
Distribution:
p(x,y|z,a) = p(x|z)p(y|a)
Network architecture:
p(y|a):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['a'], input_var=['a'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, a, b):
"""
Parameters
----------
a : pixyz.Distribution
Distribution.
b : pixyz.Distribution
Distribution.
"""
super().__init__(var=[], atomic=False)
self._graph = a.graph.united(b.graph)
def __repr__(self):
return repr(self.graph)
class ReplaceVarDistribution(Distribution):
"""Replace names of variables in Distribution.
Examples
--------
>>> p = DistributionBase(var=["x"],cond_var=["z"])
>>> print(p)
Distribution:
p(x|z)
Network architecture:
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> replace_dict = {'x': 'y'}
>>> p_repl = ReplaceVarDistribution(p, replace_dict)
>>> print(p_repl)
Distribution:
p(y|z)
Network architecture:
p(y|z) -> p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p, replace_dict):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.MultiplyDistribution`)
Distribution.
replace_dict : dict
Dictionary.
"""
super().__init__(var=[], cond_var=[], name=p.name, features_shape=p.features_shape, atomic=False)
self._graph = p.graph.var_replaced(replace_dict)
self.p = p
def __repr__(self):
return repr(self.graph)
def forward(self, *args, **kwargs):
return self.p(*args, **kwargs)
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
import warnings
warnings.warn("this magic method will be deprecated.")
return self.p.__getattribute__(item)
class MarginalizeVarDistribution(Distribution):
r"""Marginalize variables in Distribution.
.. math::
p(x) = \int p(x,z) dz
Examples
--------
>>> a = DistributionBase(var=["x"],cond_var=["z"])
>>> b = DistributionBase(var=["y"],cond_var=["z"])
>>> p_multi = a * b
>>> print(p_multi)
Distribution:
p(x,y|z) = p(x|z)p(y|z)
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
>>> p_marg = MarginalizeVarDistribution(p_multi, ["y"])
>>> print(p_marg)
Distribution:
p(x|z) = \int p(x|z)p(y|z)dy
Network architecture:
p(y|z):
DistributionBase(
name=p, distribution_name=,
var=['y'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
p(x|z):
DistributionBase(
name=p, distribution_name=,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
)
"""
def __init__(self, p: Distribution, marginalize_list):
"""
Parameters
----------
p : :class:`pixyz.distributions.Distribution` (not :class:`pixyz.distributions.DistributionBase`)
Distribution.
marginalize_list : list
Variables to marginalize.
"""
marginalize_list = tolist(marginalize_list)
super().__init__(var=[], cond_var=[], name=p.name, features_shape=p.features_shape, atomic=False)
self._graph = p.graph.marginalized(marginalize_list)
self.p = p
def __repr__(self):
return repr(self.graph)
def forward(self, *args, **kwargs):
return self.p(*args, **kwargs)
def sample_mean(self, x_dict={}):
return self.p.sample_mean(x_dict)
def sample_variance(self, x_dict={}):
return self.p.sample_variance(x_dict)
def get_entropy(self, x_dict={}, sum_features=True, feature_dims=None):
return self.p.get_entropy(x_dict, sum_features, feature_dims)
@property
def distribution_name(self):
return self.p.distribution_name
def __getattr__(self, item):
try:
return super().__getattr__(item)
except AttributeError:
import warnings
warnings.warn("this magic method will be deprecated.")
return self.p.__getattribute__(item)
| 70,384 | 36.800752 | 137 | py |
pixyz | pixyz-main/pixyz/distributions/exponential_distributions.py | import torch
from torch.distributions import Normal as NormalTorch
from torch.distributions import Bernoulli as BernoulliTorch
from torch.distributions import RelaxedBernoulli as RelaxedBernoulliTorch
from torch.distributions import RelaxedOneHotCategorical as RelaxedOneHotCategoricalTorch
from torch.distributions.one_hot_categorical import OneHotCategorical as CategoricalTorch
from torch.distributions import Multinomial as MultinomialTorch
from torch.distributions import Dirichlet as DirichletTorch
from torch.distributions import Beta as BetaTorch
from torch.distributions import Laplace as LaplaceTorch
from torch.distributions import Gamma as GammaTorch
from torch.distributions.utils import broadcast_all
from torch.nn.functional import binary_cross_entropy_with_logits
from ..utils import get_dict_values, sum_samples
from .distributions import DistributionBase
def _valid_param_dict(raw_dict):
return {var_name: value for var_name, value in raw_dict.items() if value is not None}
class Normal(DistributionBase):
"""Normal distribution parameterized by :attr:`loc` and :attr:`scale`. """
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), loc=None, scale=None):
super().__init__(var, cond_var, name, features_shape, **_valid_param_dict({'loc': loc, 'scale': scale}))
@property
def params_keys(self):
return ["loc", "scale"]
@property
def distribution_torch_class(self):
return NormalTorch
@property
def distribution_name(self):
return "Normal"
@property
def has_reparam(self):
return True
class BernoulliTorchOld(BernoulliTorch):
def log_prob(self, value):
logits, value = broadcast_all(self.logits, value)
return -binary_cross_entropy_with_logits(logits, value, reduction='none')
class Bernoulli(DistributionBase):
"""Bernoulli distribution parameterized by :attr:`probs`."""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var, cond_var, name, features_shape, **_valid_param_dict({'probs': probs}))
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return BernoulliTorchOld
@property
def distribution_name(self):
return "Bernoulli"
@property
def has_reparam(self):
return False
class RelaxedBernoulli(Bernoulli):
"""Relaxed (re-parameterizable) Bernoulli distribution parameterized by :attr:`probs` and :attr:`temperature`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), temperature=torch.tensor(0.1),
probs=None):
super(Bernoulli, self).__init__(var, cond_var, name, features_shape, **_valid_param_dict({
'probs': probs, 'temperature': temperature}))
@property
def params_keys(self):
return ["probs", "temperature"]
@property
def distribution_torch_class(self):
"""Use relaxed version only when sampling"""
return RelaxedBernoulliTorch
@property
def distribution_name(self):
return "RelaxedBernoulli"
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sampling : :obj:`bool` defaults to False.
If it is false, the distribution will not be relaxed to compute log_prob.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError("{} class requires following parameters: {}\n"
"but got {}".format(type(self), set(self.params_keys), set(params.keys())))
if sampling:
self._dist = self.distribution_torch_class(**params)
else:
hard_params_keys = ["probs"]
self._dist = BernoulliTorchOld(**get_dict_values(params, hard_params_keys, return_dict=True))
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n, sampling=True)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return True
class FactorizedBernoulli(Bernoulli):
"""
Factorized Bernoulli distribution parameterized by :attr:`probs`.
References
----------
[Vedantam+ 2017] Generative Models of Visually Grounded Imagination
"""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape, probs=probs)
@property
def distribution_name(self):
return "FactorizedBernoulli"
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
log_prob = super().get_log_prob(x_dict, sum_features=False, **kwargs)
[_x] = get_dict_values(x_dict, self._var)
log_prob[_x == 0] = 0
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
class CategoricalTorchOld(CategoricalTorch):
def log_prob(self, value):
indices = value.max(-1)[1]
return self._categorical.log_prob(indices)
class Categorical(DistributionBase):
"""Categorical distribution parameterized by :attr:`probs`."""
def __init__(self, var=['x'], cond_var=[], name='p', features_shape=torch.Size(), probs=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'probs': probs}))
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return CategoricalTorchOld
@property
def distribution_name(self):
return "Categorical"
@property
def has_reparam(self):
return False
class RelaxedCategorical(Categorical):
"""
Relaxed (re-parameterizable) categorical distribution parameterized by :attr:`probs` and :attr:`temperature`.
Notes: a shape of temperature should contain the event shape of this Categorical distribution.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), temperature=torch.tensor(0.1),
probs=None):
super(Categorical, self).__init__(var, cond_var, name, features_shape,
**_valid_param_dict({'probs': probs, 'temperature': temperature}))
@property
def params_keys(self):
return ['probs', 'temperature']
@property
def distribution_torch_class(self):
"""Use relaxed version only when sampling"""
return RelaxedOneHotCategoricalTorch
@property
def distribution_name(self):
return "RelaxedCategorical"
def set_dist(self, x_dict={}, batch_n=None, sampling=False, **kwargs):
"""Set :attr:`dist` as PyTorch distributions given parameters.
This requires that :attr:`params_keys` and :attr:`distribution_torch_class` are set.
Parameters
----------
x_dict : :obj:`dict`, defaults to {}.
Parameters of this distribution.
batch_n : :obj:`int`, defaults to None.
Set batch size of parameters.
sampling : :obj:`bool` defaults to False.
If it is false, the distribution will not be relaxed to compute log_prob.
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
params = self.get_params(x_dict, **kwargs)
if set(self.params_keys) != set(params.keys()):
raise ValueError("{} class requires following parameters: {}\n"
"but got {}".format(type(self), set(self.params_keys), set(params.keys())))
if sampling:
self._dist = self.distribution_torch_class(**params)
else:
hard_params_keys = ["probs"]
self._dist = BernoulliTorchOld(**get_dict_values(params, hard_params_keys, return_dict=True))
# expand batch_n
if batch_n:
batch_shape = self._dist.batch_shape
if batch_shape[0] == 1:
self._dist = self._dist.expand(torch.Size([batch_n]) + batch_shape[1:])
elif batch_shape[0] == batch_n:
return
else:
raise ValueError()
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
sample_mean=False, **kwargs):
# check whether the input is valid or convert it to valid dictionary.
input_dict = self._get_input_dict(x_dict)
self.set_dist(input_dict, batch_n=batch_n, sampling=True)
if sample_mean:
mean = self.dist.mean
if sample_shape != torch.Size():
unsqueeze_shape = torch.Size([1] * len(sample_shape))
unrepeat_shape = torch.Size([1] * mean.ndim)
mean = mean.reshape(unsqueeze_shape + mean.shape).repeat(sample_shape + unrepeat_shape)
output_dict = {self._var[0]: mean}
else:
output_dict = self.get_sample(reparam=reparam, sample_shape=sample_shape)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return True
class Multinomial(DistributionBase):
"""Multinomial distribution parameterized by :attr:`total_count` and :attr:`probs`."""
def __init__(self, total_count=1, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), probs=None):
self._total_count = total_count
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'probs': probs}))
@property
def total_count(self):
return self._total_count
@property
def params_keys(self):
return ["probs"]
@property
def distribution_torch_class(self):
return MultinomialTorch
@property
def distribution_name(self):
return "Multinomial"
@property
def has_reparam(self):
return False
class Dirichlet(DistributionBase):
"""Dirichlet distribution parameterized by :attr:`concentration`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration': concentration}))
@property
def params_keys(self):
return ["concentration"]
@property
def distribution_torch_class(self):
return DirichletTorch
@property
def distribution_name(self):
return "Dirichlet"
@property
def has_reparam(self):
return True
class Beta(DistributionBase):
"""Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`."""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration1=None,
concentration0=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration1': concentration1, 'concentration0': concentration0}))
@property
def params_keys(self):
return ["concentration1", "concentration0"]
@property
def distribution_torch_class(self):
return BetaTorch
@property
def distribution_name(self):
return "Beta"
@property
def has_reparam(self):
return True
class Laplace(DistributionBase):
"""
Laplace distribution parameterized by :attr:`loc` and :attr:`scale`.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), loc=None, scale=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'loc': loc, 'scale': scale}))
@property
def params_keys(self):
return ["loc", "scale"]
@property
def distribution_torch_class(self):
return LaplaceTorch
@property
def distribution_name(self):
return "Laplace"
@property
def has_reparam(self):
return True
class Gamma(DistributionBase):
"""
Gamma distribution parameterized by :attr:`concentration` and :attr:`rate`.
"""
def __init__(self, var=["x"], cond_var=[], name="p", features_shape=torch.Size(), concentration=None, rate=None):
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape,
**_valid_param_dict({'concentration': concentration, 'rate': rate}))
@property
def params_keys(self):
return ["concentration", "rate"]
@property
def distribution_torch_class(self):
return GammaTorch
@property
def distribution_name(self):
return "Gamma"
@property
def has_reparam(self):
return True
| 14,788 | 33.154734 | 117 | py |
pixyz | pixyz-main/pixyz/distributions/poe.py | from __future__ import print_function
import torch
from torch import nn
from ..utils import tolist, get_dict_values
from ..distributions import Normal
class ProductOfNormal(Normal):
r"""Product of normal distributions.
.. math::
p(z|x,y) \propto p(z)p(z|x)p(z|y)
In this models, :math:`p(z|x)` and :math:`p(a|y)` perform as `experts` and :math:`p(z)` corresponds
a prior of `experts`.
References
----------
[Vedantam+ 2017] Generative Models of Visually Grounded Imagination
[Wu+ 2018] Multimodal Generative Models for Scalable Weakly-Supervised Learning
Examples
--------
>>> pon = ProductOfNormal([p_x, p_y]) # doctest: +SKIP
>>> pon.sample({"x": x, "y": y}) # doctest: +SKIP
{'x': tensor([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]],),
'y': tensor([[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 1., ..., 0., 0., 0.],
[0., 1., 0., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 0., 1., 0.],
[1., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 1.]]),
'z': tensor([[ 0.6611, 0.3811, 0.7778, ..., -0.0468, -0.3615, -0.6569],
[-0.0071, -0.9178, 0.6620, ..., -0.1472, 0.6023, 0.5903],
[-0.3723, -0.7758, 0.0195, ..., 0.8239, -0.3537, 0.3854],
...,
[ 0.7820, -0.4761, 0.1804, ..., -0.5701, -0.0714, -0.5485],
[-0.1873, -0.2105, -0.1861, ..., -0.5372, 0.0752, 0.2777],
[-0.2563, -0.0828, 0.1605, ..., 0.2767, -0.8456, 0.7364]])}
>>> pon.sample({"y": y}) # doctest: +SKIP
{'y': tensor([[0., 0., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 1.],
[0., 0., 0., ..., 1., 0., 0.],
...,
[0., 0., 0., ..., 0., 0., 0.],
[0., 1., 0., ..., 0., 0., 0.],
[0., 0., 0., ..., 0., 0., 0.]]),
'z': tensor([[-0.3264, -0.4448, 0.3610, ..., -0.7378, 0.3002, 0.4370],
[ 0.0928, -0.1830, 1.1768, ..., 1.1808, -0.7226, -0.4152],
[ 0.6999, 0.2222, -0.2901, ..., 0.5706, 0.7091, 0.5179],
...,
[ 0.5688, -1.6612, -0.0713, ..., -0.1400, -0.3903, 0.2533],
[ 0.5412, -0.0289, 0.6365, ..., 0.7407, 0.7838, 0.9218],
[ 0.0299, 0.5148, -0.1001, ..., 0.9938, 1.0689, -1.1902]])}
>>> pon.sample() # same as sampling from unit Gaussian. # doctest: +SKIP
{'z': tensor(-0.4494)}
"""
def __init__(self, p=[], weight_modalities=None, name="p", features_shape=torch.Size()):
"""
Parameters
----------
p : :obj:`list` of :class:`pixyz.distributions.Normal`.
List of experts.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
Examples
--------
>>> p_x = Normal(cond_var=['z'], loc='z', scale=torch.ones(1, 1))
>>> pon = ProductOfNormal([p_x])
>>> sample = pon.sample({'z': torch.zeros(1, 1)})
>>> sample # doctest: +SKIP
"""
p = tolist(p)
if len(p) == 0:
raise ValueError()
if weight_modalities is not None:
if len(weight_modalities) != len(p) + 1:
raise ValueError()
var = p[0].var
cond_var = []
for _p in p:
if _p.var != var:
raise ValueError()
if _p.distribution_name != "Normal":
raise ValueError()
cond_var += _p.cond_var
self.input_ids = [[] for _ in p]
self.save_output_dict = 0
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self.p = nn.ModuleList(p)
if weight_modalities is None:
self.weight_modalities = [1. for _ in range(len(self.p) + 1)]
else:
self.weight_modalities = weight_modalities
@property
def prob_factorized_text(self):
prob_text = "p({})".format(
','.join(self._var)
)
if len(self._cond_var) != 0:
prob_text += "".join([p.prob_text for p in self.p])
return prob_text
@property
def prob_joint_factorized_and_text(self):
"""str: Return a formula of the factorized probability distribution."""
if self.prob_factorized_text == self.prob_text:
prob_text = self.prob_text
else:
prob_text = "{} \\propto {}".format(self.prob_text, self.prob_factorized_text)
return prob_text
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
weight : np.array
(n_expert, )
"""
loc = []
scale = []
weight = [self.weight_modalities[0]]
for i, _p in enumerate(self.p):
inputs_dict = get_dict_values(params_dict, _p.cond_var, True)
if len(inputs_dict) != 0:
outputs = _p.get_params(inputs_dict, **kwargs)
loc.append(outputs["loc"])
scale.append(outputs["scale"])
weight.append(self.weight_modalities[i + 1])
loc = torch.stack(loc)
scale = torch.stack(scale)
weight = torch.Tensor(weight).to(scale.device)
# expand weight
for i in range(len(loc.shape) - 1):
weight = weight.unsqueeze(-1)
return loc, scale, weight
def get_params(self, params_dict={}, **kwargs):
_input_ids = [id(v) for v in list(params_dict.values())]
if _input_ids == self.input_ids:
return self.save_output_dict
else:
# experts
if len(params_dict) > 0:
loc, scale, weight = self._get_expert_params(params_dict, **kwargs) # (n_expert, n_batch, output_dim)
else:
loc = torch.zeros(1)
scale = torch.zeros(1)
weight = torch.ones(1).to(scale.device)
output_loc, output_scale = self._compute_expert_params(loc, scale, weight)
output_dict = {"loc": output_loc, "scale": output_scale}
self.save_output_dict = output_dict
self.input_ids = _input_ids
return output_dict
@staticmethod
def _compute_expert_params(loc, scale, weight):
"""Compute parameters for the product of experts.
Is is assumed that unspecified experts are excluded from inputs.
Parameters
----------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
Returns
-------
output_loc : torch.Tensor
Mean vectors for this distribution. (n_batch, output_dim)
output_scale : torch.Tensor
The square root of diagonal covariance matrices for this distribution. (n_batch, output_dim)
"""
variance = scale ** 2
# parameter for prior
prior_prec = 1 # prior_loc is not specified because it is equal to 0.
# compute the diagonal precision matrix.
prec = torch.zeros_like(variance).type(scale.dtype)
prec[variance != 0] = 1. / variance[variance != 0]
# compute the square root of a diagonal covariance matrix for the product of distributions.
output_prec = torch.sum(weight[1:] * prec, dim=0) + weight[0] * prior_prec
output_variance = 1. / output_prec # (n_batch, output_dim)
# compute the mean vectors for the product of normal distributions.
output_loc = torch.sum(weight[1:] * prec * loc, dim=0) # (n_batch, output_dim)
output_loc = output_loc * output_variance
return output_loc, torch.sqrt(output_variance)
def _get_input_dict(self, x, var=None):
if var is None:
var = self.input_var
if type(x) is torch.Tensor:
checked_x = {var[0]: x}
elif type(x) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
checked_x = dict(zip(var, x))
elif type(x) is dict:
# point of modification
checked_x = x
else:
raise ValueError("The type of input is not valid, got %s." % type(x))
return get_dict_values(checked_x, var, return_dict=True)
def log_prob(self, sum_features=True, feature_dims=None):
raise NotImplementedError()
def prob(self, sum_features=True, feature_dims=None):
raise NotImplementedError()
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
raise NotImplementedError()
class ElementWiseProductOfNormal(ProductOfNormal):
r"""Product of normal distributions.
In this distribution, each element of the input vector on the given distribution is considered as
a different expert.
.. math::
p(z|x) = p(z|x_1, x_2) \propto p(z)p(z|x_1)p(z|x_2)
Examples
--------
>>> pon = ElementWiseProductOfNormal(p) # doctest: +SKIP
>>> pon.sample({"x": x}) # doctest: +SKIP
{'x': tensor([[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 0., 0., 0., 0., 0.]]),
'z': tensor([[-0.3572, -0.0632, 0.4872, 0.2269, -0.1693, -0.0160, -0.0429, 0.2017,
-0.1589, -0.3380, -0.9598, 0.6216, -0.4296, -1.1349, 0.0901, 0.3994,
0.2313, -0.5227, -0.7973, 0.3968, 0.7137, -0.5639, -0.4891, -0.1249,
0.8256, 0.1463, 0.0801, -1.2202, 0.6984, -0.4036, 0.4960, -0.4376,
0.3310, -0.2243, -0.2381, -0.2200, 0.8969, 0.2674, 0.4681, 1.6764,
0.8127, 0.2722, -0.2048, 0.1903, -0.1398, 0.0099, 0.4382, -0.8016,
0.9947, 0.7556, -0.2017, -0.3920, 1.4212, -1.2529, -0.1002, -0.0031,
0.1876, 0.4267, 0.3622, 0.2648, 0.4752, 0.0843, -0.3065, -0.4922],
[ 0.3770, -0.0413, 0.9102, 0.2897, -0.0567, 0.5211, 1.5233, -0.3539,
0.5163, -0.2271, -0.1027, 0.0294, -1.4617, 0.1640, 0.2025, -0.2190,
0.0555, 0.5779, -0.2930, -0.2161, 0.2835, -0.0354, -0.2569, -0.7171,
0.0164, -0.4080, 1.1088, 0.3947, 0.2720, -0.0600, -0.9295, -0.0234,
0.5624, 0.4866, 0.5285, 1.1827, 0.2494, 0.0777, 0.7585, 0.5127,
0.7500, -0.3253, 0.0250, 0.0888, 1.0340, -0.1405, -0.8114, 0.4492,
0.2725, -0.0270, 0.6379, -0.8096, 0.4259, 0.3179, -0.1681, 0.3365,
0.6305, 0.5203, 0.2384, 0.0572, 0.4804, 0.9553, -0.3244, 1.5373]])}
>>> pon.sample({"x": torch.zeros_like(x)}) # same as sampling from unit Gaussian. # doctest: +SKIP
{'x': tensor([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]),
'z': tensor([[-0.7777, -0.5908, -1.5498, -0.7505, 0.6201, 0.7218, 1.0045, 0.8923,
-0.8030, -0.3569, 0.2932, 0.2122, 0.1640, 0.7893, -0.3500, -1.0537,
-1.2769, 0.6122, -1.0083, -0.2915, -0.1928, -0.7486, 0.2418, -1.9013,
1.2514, 1.3035, -0.3029, -0.3098, -0.5415, 1.1970, -0.4443, 2.2393,
-0.6980, 0.2820, 1.6972, 0.6322, 0.4308, 0.8953, 0.7248, 0.4440,
2.2770, 1.7791, 0.7563, -1.1781, -0.8331, 0.1825, 1.5447, 0.1385,
-1.1348, 0.0257, 0.3374, 0.5889, 1.1231, -1.2476, -0.3801, -1.4404,
-1.3066, -1.2653, 0.5958, -1.7423, 0.7189, -0.7236, 0.2330, 0.3117],
[ 0.5495, 0.7210, -0.4708, -2.0631, -0.6170, 0.2436, -0.0133, -0.4616,
-0.8091, -0.1592, 1.3117, 0.0276, 0.6625, -0.3748, -0.5049, 1.8260,
-0.3631, 1.1546, -1.0913, 0.2712, 1.5493, 1.4294, -2.1245, -2.0422,
0.4976, -1.2785, 0.5028, 1.4240, 1.1983, 0.2468, 1.1682, -0.6725,
-1.1198, -1.4942, -0.3629, 0.1325, -0.2256, 0.4280, 0.9830, -1.9427,
-0.2181, 1.1850, -0.7514, -0.8172, 2.1031, -0.1698, -0.3777, -0.7863,
1.0936, -1.3720, 0.9999, 1.3302, -0.8954, -0.5999, 2.3305, 0.5702,
-1.0767, -0.2750, -0.3741, -0.7026, -1.5408, 0.0667, 1.2550, -0.5117]])}
"""
def __init__(self, p, name="p", features_shape=torch.Size()):
r"""
Parameters
----------
p : pixyz.distributions.Normal
Each element of this input vector is considered as a different expert.
When some elements are 0, experts corresponding to these elements are considered not to be specified.
:math:`p(z|x) = p(z|x_1, x_2=0) \propto p(z)p(z|x_1)`
name : str, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
if len(p.cond_var) != 1:
raise ValueError()
super().__init__(p=p, name=name, features_shape=features_shape)
def _get_input_dict(self, x, var=None):
return super(ProductOfNormal)._get_input_dict(x, var)
@staticmethod
def _get_mask(inputs, index):
"""Get a mask to the input to specify an expert identified by index.
Parameters
----------
inputs : torch.Tensor
index : int
Returns
-------
torch.Tensor
"""
mask = torch.zeros_like(inputs).type(inputs.dtype)
mask[:, index] = 1
return mask
def _get_params_with_masking(self, inputs, index, **kwargs):
"""Get the output parameters of the index-specified expert.
Parameters
----------
inputs : torch.Tensor
index : int
**kwargs
Arbitrary keyword arguments.
Returns
-------
outputs : torch.Tensor
Examples
--------
>>> # pon = ElementWiseProductOfNormal(p)
>>> # a = torch.tensor([[1, 0, 0], [0, 1, 0]])
>>> # pon._get_params_with_masking(a, 0)
tensor([[[0.01, 0.0131],
[0, 0]], # loc
[[0.42, 0.39],
[1, 1]], # scale
])
>>> # pon._get_params_with_masking(a, 1)
tensor([[[0, 0],
[0.021, 0.11]], # loc
[[1, 1],
[0.293, 0.415]], # scale
])
>>> # self._get_params_with_masking(a, 2)
tensor([[[0, 0],
[0, 0]], # loc
[[1, 1],
[1, 1]], # scale
])
"""
mask = self._get_mask(inputs, index) # (n_batch, n_expert)
outputs_dict = self.p.get_params({self.cond_var[0]: inputs * mask}, **kwargs)
outputs = torch.stack([outputs_dict["loc"], outputs_dict["scale"]]) # (2, n_batch, output_dim)
# When the index-th expert in the output examples is not specified, set zero to them.
outputs[:, inputs[:, index] == 0, :] = 0
return outputs
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
torch.Tensor
torch.Tensor
"""
inputs = get_dict_values(params_dict, self.cond_var)[0] # (n_batch, n_expert=input_dim)
n_expert = inputs.size()[1]
outputs = [self._get_params_with_masking(inputs, i) for i in range(n_expert)]
outputs = torch.stack(outputs) # (n_expert, 2, n_batch, output_dim)
return outputs[:, 0, :, :], outputs[:, 1, :, :] # (n_expert, n_batch, output_dim)
| 16,619 | 38.856115 | 118 | py |
pixyz | pixyz-main/pixyz/distributions/mixture_distributions.py | import torch
from torch import nn
from ..distributions.distributions import Distribution
from ..utils import convert_latex_name
class MixtureModel(Distribution):
r"""Mixture models.
.. math::
p(x) = \sum_i p(x|z=i)p(z=i)
Examples
--------
>>> from pixyz.distributions import Normal, Categorical
>>> from pixyz.distributions.mixture_distributions import MixtureModel
>>> z_dim = 3 # the number of mixture
>>> x_dim = 2 # the input dimension.
>>> distributions = [] # the list of distributions
>>> for i in range(z_dim):
... loc = torch.randn(x_dim) # initialize the value of location (mean)
... scale = torch.empty(x_dim).fill_(1.) # initialize the value of scale (variance)
... distributions.append(Normal(loc=loc, scale=scale, var=["x"], name="p_%d" %i))
>>> probs = torch.empty(z_dim).fill_(1. / z_dim) # initialize the value of probabilities
>>> prior = Categorical(probs=probs, var=["z"], name="prior")
>>> p = MixtureModel(distributions=distributions, prior=prior)
>>> print(p)
Distribution:
p(x) = p_{0}(x|z=0)prior(z=0) + p_{1}(x|z=1)prior(z=1) + p_{2}(x|z=2)prior(z=2)
Network architecture:
MixtureModel(
name=p, distribution_name=Mixture Model,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([])
(distributions): ModuleList(
(0): Normal(
name=p_{0}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
(1): Normal(
name=p_{1}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
(2): Normal(
name=p_{2}, distribution_name=Normal,
var=['x'], cond_var=[], input_var=[], features_shape=torch.Size([2])
(loc): torch.Size([1, 2])
(scale): torch.Size([1, 2])
)
)
(prior): Categorical(
name=prior, distribution_name=Categorical,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([3])
(probs): torch.Size([1, 3])
)
)
"""
def __init__(self, distributions, prior, name="p"):
"""
Parameters
----------
distributions : list
List of distributions.
prior : pixyz.Distribution.Categorical
Prior distribution of latent variable (i.e., a contribution rate).
This should be a categorical distribution and
the number of its category should be the same as the length of :attr:`distributions`.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in :attr:`prob_text` and :attr:`prob_factorized_text`.
"""
if not isinstance(distributions, list):
raise ValueError()
else:
distributions = nn.ModuleList(distributions)
if prior.distribution_name != "Categorical":
raise ValueError("The prior must be the categorical distribution.")
# check the number of mixture
if prior.get_params()["probs"].shape[-1] != len(distributions):
raise ValueError("The number of its category must be the same as the length of the distribution list.")
# check whether all distributions have the same variable.
var_list = []
for d in distributions:
var_list += d.var
var_list = list(set(var_list))
if len(var_list) != 1:
raise ValueError("All distributions must have the same variable.")
hidden_var = prior.var
super().__init__(var=var_list, name=name)
self.distributions = distributions
self.prior = prior
self._hidden_var = hidden_var
@property
def hidden_var(self):
"""list: Hidden variables of this distribution."""
return self._hidden_var
@property
def prob_factorized_text(self):
_mixture_prob_text = []
for i, d in enumerate(self.distributions):
_mixture_prob_text.append("{}({}|{}={}){}({}={})".format(
d.name, self.var[0], self._hidden_var[0], i,
self.prior.name, self._hidden_var[0], i
))
_prob_text = ' + '.join(_mixture_prob_text)
return _prob_text
@property
def distribution_name(self):
return "Mixture Model"
def posterior(self, name=None):
return PosteriorMixtureModel(self, name=name)
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, return_hidden=False,
sample_mean=False, **kwargs):
input_dict = self._get_input_dict(x_dict)
# sample from prior
hidden_output = self.prior.sample(input_dict, batch_n=batch_n,
sample_mean=sample_mean, return_all=False, **kwargs)[self._hidden_var[0]]
var_output = []
for _hidden_output in hidden_output:
var_output.append(self.distributions[_hidden_output.argmax(dim=-1)].sample(
input_dict, sample_mean=sample_mean, return_all=False, **kwargs)[self._var[0]])
var_output = torch.cat(var_output, dim=0)
output_dict = {self._var[0]: var_output}
if return_hidden:
output_dict.update({self._hidden_var[0]: hidden_output})
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return False
def get_log_prob(self, x_dict, return_hidden=False, **kwargs):
"""Evaluate log-pdf, log p(x) (if return_hidden=False) or log p(x, z) (if return_hidden=True).
Parameters
----------
x_dict : dict
Input variables (including `var`).
return_hidden : :obj:`bool`, defaults to False
Returns
-------
log_prob : torch.Tensor
The log-pdf value of x.
return_hidden = 0 :
dim=0 : the size of batch
return_hidden = 1 :
dim=0 : the number of mixture
dim=1 : the size of batch
"""
log_prob_all = []
_device = x_dict[self._var[0]].device
eye_tensor = torch.eye(len(self.distributions)).to(_device) # for prior
for i, d in enumerate(self.distributions):
# p(z=i)
prior_log_prob = self.prior.log_prob().eval({self._hidden_var[0]: eye_tensor[i]})
# p(x|z=i)
log_prob = d.log_prob().eval(x_dict)
# p(x, z=i)
log_prob_all.append(log_prob + prior_log_prob)
log_prob_all = torch.stack(log_prob_all, dim=0) # (num_mix, batch_size)
if return_hidden:
return log_prob_all
return torch.logsumexp(log_prob_all, 0)
class PosteriorMixtureModel(Distribution):
def __init__(self, p, name=None):
if name is None:
name = p.name
super().__init__(var=p.var, name=name)
self.p = p
self._hidden_var = p.hidden_var
@property
def hidden_var(self):
"""list: Hidden variables of this distribution."""
return self._hidden_var
@property
def prob_text(self):
_prob_text = "{}({}|{})".format(
self._name, convert_latex_name(self._hidden_var[0]), convert_latex_name(self._var[0])
)
return _prob_text
@property
def prob_factorized_text(self):
numinator = "{" + "{}({},{})".format(self._name, self._hidden_var[0], self._var[0]) + "}"
denominator = "{" + "{}({})".format(self._name, self._var[0]) + "}"
_prob_text = "\\frac{}{}".format(numinator, denominator)
return _prob_text
@property
def distribution_name(self):
return "Mixture Model (Posterior)"
def sample(self, *args, **kwargs):
raise NotImplementedError()
@property
def has_reparam(self):
return False
def get_log_prob(self, x_dict, **kwargs):
# log p(z|x) = log p(x, z) - log p(x)
log_prob = self.p.get_log_prob(x_dict, return_hidden=True, **kwargs) - self.p.get_log_prob(x_dict, **kwargs)
return log_prob # (num_mix, batch_size)
| 8,520 | 32.415686 | 116 | py |
pixyz | pixyz-main/pixyz/distributions/custom_distributions.py | from ..utils import get_dict_values, sum_samples
from .distributions import Distribution
class CustomProb(Distribution):
"""This distribution is constructed by user-defined probability density/mass function.
Note that this distribution cannot perform sampling.
Examples
--------
>>> import torch
>>> # banana shaped distribution
>>> def log_prob(z):
... z1, z2 = torch.chunk(z, chunks=2, dim=1)
... norm = torch.sqrt(z1 ** 2 + z2 ** 2)
... exp1 = torch.exp(-0.5 * ((z1 - 2) / 0.6) ** 2)
... exp2 = torch.exp(-0.5 * ((z1 + 2) / 0.6) ** 2)
... u = 0.5 * ((norm - 2) / 0.4) ** 2 - torch.log(exp1 + exp2)
... return -u
...
>>> p = CustomProb(log_prob, var=["z"])
>>> loss = p.log_prob().eval({"z": torch.randn(10, 2)})
"""
def __init__(self, log_prob_function, var, distribution_name="Custom PDF", **kwargs):
"""
Parameters
----------
log_prob_function : function
User-defined log-probability density/mass function.
var : list
Variables of this distribution.
distribution_name : :obj:`str`, optional
Name of this distribution.
+*kwargs :
Arbitrary keyword arguments.
"""
self._log_prob_function = log_prob_function
self._distribution_name = distribution_name
super().__init__(var=var, **kwargs)
@property
def log_prob_function(self):
"""User-defined log-probability density/mass function."""
return self._log_prob_function
@property
def input_var(self):
return self.var
@property
def distribution_name(self):
return self._distribution_name
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
x_dict = get_dict_values(x_dict, self._var, return_dict=True)
log_prob = self.log_prob_function(**x_dict)
if sum_features:
log_prob = sum_samples(log_prob, feature_dims)
return log_prob
def sample(self, x_dict={}, return_all=True, **kwargs):
raise NotImplementedError()
@property
def has_reparam(self):
return False
| 2,210 | 29.708333 | 90 | py |
pixyz | pixyz-main/pixyz/distributions/moe.py | from __future__ import print_function
import torch
from torch import nn
import numpy as np
from ..utils import tolist, get_dict_values
from ..distributions import Normal
class MixtureOfNormal(Normal):
r"""Mixture of normal distributions.
.. math::
p(z|x,y) = p(z|x) + p(z|y)
In this models, :math:`p(z|x)` and :math:`p(a|y)` perform as `experts`.
References
----------
[Shi+ 2019] Variational Mixture-of-Experts Autoencoders for Multi-Modal Deep Generative Models
"""
def __init__(self, p=[], weight_modalities=None, name="p", features_shape=torch.Size()):
"""
Parameters
----------
p : :obj:`list` of :class:`pixyz.distributions.Normal`.
List of experts.
name : :obj:`str`, defaults to "p"
Name of this distribution.
This name is displayed in prob_text and prob_factorized_text.
features_shape : :obj:`torch.Size` or :obj:`list`, defaults to torch.Size())
Shape of dimensions (features) of this distribution.
"""
p = tolist(p)
if len(p) == 0:
raise ValueError()
if weight_modalities is None:
weight_modalities = torch.ones(len(p)) / float(len(p))
elif len(weight_modalities) != len(p):
raise ValueError()
var = p[0].var
cond_var = []
for _p in p:
if _p.var != var:
raise ValueError()
cond_var += _p.cond_var
cond_var = list(set(cond_var))
super().__init__(var=var, cond_var=cond_var, name=name, features_shape=features_shape)
self.p = nn.ModuleList(p)
self.weight_modalities = weight_modalities
def _get_expert_params(self, params_dict={}, **kwargs):
"""Get the output parameters of all experts.
Parameters
----------
params_dict : dict
**kwargs
Arbitrary keyword arguments.
Returns
-------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
weight : np.array
(n_expert, )
"""
loc = []
scale = []
for i, _p in enumerate(self.p):
inputs_dict = get_dict_values(params_dict, _p.cond_var, True)
if len(inputs_dict) != 0:
outputs = _p.get_params(inputs_dict, **kwargs)
loc.append(outputs["loc"])
scale.append(outputs["scale"])
loc = torch.stack(loc)
scale = torch.stack(scale)
return loc, scale
def get_params(self, params_dict={}, **kwargs):
# experts
if len(params_dict) > 0:
loc, scale = self._get_expert_params(params_dict, **kwargs) # (n_expert, n_batch, output_dim)
else:
raise ValueError()
output_loc, output_scale = self._compute_expert_params(loc, scale)
output_dict = {"loc": output_loc, "scale": output_scale}
return output_dict
def _compute_expert_params(self, loc, scale):
"""Compute parameters for the product of experts.
Is is assumed that unspecified experts are excluded from inputs.
Parameters
----------
loc : torch.Tensor
Concatenation of mean vectors for specified experts. (n_expert, n_batch, output_dim)
scale : torch.Tensor
Concatenation of the square root of a diagonal covariance matrix for specified experts.
(n_expert, n_batch, output_dim)
Returns
-------
output_loc : torch.Tensor
Mean vectors for this distribution. (n_batch, output_dim)
output_scale : torch.Tensor
The square root of diagonal covariance matrices for this distribution. (n_batch, output_dim)
"""
num_samples = loc.shape[1]
idx_start = []
idx_end = []
for k in range(0, len(self.weight_modalities)):
if k == 0:
i_start = 0
else:
i_start = int(idx_end[k - 1])
if k == len(self.weight_modalities) - 1:
i_end = num_samples
else:
i_end = i_start + int(np.floor(num_samples * self.weight_modalities[k]))
idx_start.append(i_start)
idx_end.append(i_end)
idx_end[-1] = num_samples
output_loc = torch.cat([loc[k, idx_start[k]:idx_end[k], :] for k in range(len(self.weight_modalities))])
output_scale = torch.cat([scale[k, idx_start[k]:idx_end[k], :] for k in range(len(self.weight_modalities))])
return output_loc, output_scale
def _get_input_dict(self, x, var=None):
if var is None:
var = self.input_var
if type(x) is torch.Tensor:
checked_x = {var[0]: x}
elif type(x) is list:
# TODO: we need to check if all the elements contained in this list are torch.Tensor.
checked_x = dict(zip(var, x))
elif type(x) is dict:
# point of modification
checked_x = x
else:
raise ValueError("The type of input is not valid, got %s." % type(x))
return get_dict_values(checked_x, var, return_dict=True)
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None):
log_prob = torch.stack([w * p.get_log_prob(x_dict, sum_features=sum_features, feature_dims=feature_dims) for p, w in zip(self.p, self.weight_modalities)])
log_prob = torch.logsumexp(log_prob, dim=0)
return log_prob
| 5,758 | 32.876471 | 162 | py |
pixyz | pixyz-main/pixyz/distributions/special_distributions.py | from __future__ import print_function
from .distributions import Distribution
class Deterministic(Distribution):
"""
Deterministic distribution (or degeneration distribution)
Examples
--------
>>> import torch
>>> class Generator(Deterministic):
... def __init__(self):
... super().__init__(var=["x"], cond_var=["z"])
... self.model = torch.nn.Linear(64, 512)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p = Generator()
>>> print(p)
Distribution:
p(x|z)
Network architecture:
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=512, bias=True)
)
>>> sample = p.sample({"z": torch.randn(1, 64)})
>>> p.log_prob().eval(sample) # log_prob is not defined.
Traceback (most recent call last):
...
NotImplementedError: Log probability of deterministic distribution is not defined.
"""
def __init__(self, var, cond_var=[], name='p', **kwargs):
super().__init__(var=var, cond_var=cond_var, name=name, **kwargs)
@property
def distribution_name(self):
return "Deterministic"
def sample(self, x_dict={}, return_all=True, **kwargs):
input_dict = self._get_input_dict(x_dict)
output_dict = self.forward(**input_dict)
if set(output_dict.keys()) != set(self._var):
raise ValueError("Output variables are not the same as `var`.")
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict):
return self.sample(x_dict, return_all=False)[self._var[0]]
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
raise NotImplementedError("Log probability of deterministic distribution is not defined.")
@property
def has_reparam(self):
return True
class EmpiricalDistribution(Distribution):
"""
Data distribution.
Samples from this distribution equal given inputs.
Examples
--------
>>> import torch
>>> p = EmpiricalDistribution(var=["x"])
>>> print(p)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> sample = p.sample({"x": torch.randn(1, 64)})
"""
def __init__(self, var, name="p_{data}"):
super().__init__(var=var, cond_var=[], name=name)
@property
def distribution_name(self):
return "Data distribution"
def sample(self, x_dict={}, return_all=True, **kwargs):
output_dict = self._get_input_dict(x_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
def sample_mean(self, x_dict):
return self.sample(x_dict, return_all=False)[self._var[0]]
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
raise NotImplementedError()
@property
def input_var(self):
"""
In EmpiricalDistribution, `input_var` is same as `var`.
"""
return self.var
@property
def has_reparam(self):
return True
| 3,517 | 27.836066 | 98 | py |
pixyz | pixyz-main/pixyz/distributions/flow_distribution.py | import torch
from ..distributions import Distribution
from ..utils import get_dict_values
class TransformedDistribution(Distribution):
r"""
Convert flow transformations to distributions.
.. math::
p(z=f_{flow}(x)),
where :math:`x \sim p_{prior}(x)`.
Once initializing, it can be handled as a distribution module.
"""
def __init__(self, prior, flow, var, name="p"):
if flow.in_features:
features_shape = [flow.in_features]
else:
features_shape = torch.Size()
super().__init__(var=var,
cond_var=prior.cond_var, name=name, features_shape=features_shape)
self.prior = prior
self.flow = flow # FlowList
self._flow_input_var = list(prior.var)
self.stored_x = {}
@property
def distribution_name(self):
return "TransformedDistribution"
@property
def flow_input_var(self):
"""list: Input variables of the flow module."""
return self._flow_input_var
@property
def prob_factorized_text(self):
flow_text = "{}=f_{{flow}}({})".format(self.var[0], self.flow_input_var[0])
prob_text = "{}({})".format(self._name, flow_text)
return prob_text
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self.flow.logdet_jacobian
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
compute_jacobian=True, **kwargs):
# sample from the prior
sample_dict = self.prior.sample(x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=False, **kwargs)
# flow transformation
_x = get_dict_values(sample_dict, self.flow_input_var)[0]
z = self.forward(_x, compute_jacobian=compute_jacobian)
output_dict = {self.var[0]: z}
output_dict.update(sample_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return self.prior.has_reparam
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, compute_jacobian=False, **kwargs):
"""
It calculates the log-likelihood for a given z.
If a flow module has no inverse method, it only supports the previously sampled z-values.
"""
inf_dict = self._inference(x_dict, compute_jacobian=compute_jacobian)
# prior
log_prob_prior = self.prior.get_log_prob(inf_dict, sum_features=sum_features, feature_dims=feature_dims,
**kwargs)
return log_prob_prior - self.logdet_jacobian
def _inference(self, x_dict, return_all=True, compute_jacobian=False):
# flow transformation
_z = get_dict_values(x_dict, self.var)
_y = get_dict_values(x_dict, self.cond_var, return_dict=True)
try:
x = self.inverse(_z[0])
except NotImplementedError:
hash_z = hash(_z[0])
if hash_z not in self.stored_x:
raise Exception("Cannot calculate x because it is not z used in the previous sample.")
x = self.stored_x[hash_z]
self.stored_x.pop(hash_z)
output_dict = {self._flow_input_var[0]: x,
self.var[0]: _z}
output_dict.update(_y)
# flow
if compute_jacobian:
self(x, compute_jacobian=True)
if return_all:
output_dict.update(x_dict)
return output_dict
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
# hotfix: Suppress warnings from pytorch about mixed memory operations
z = self.flow.forward(x=x, y=y, compute_jacobian=compute_jacobian).contiguous()
self.stored_x.clear()
self.stored_x[hash(z)] = x
return z
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
return self.flow.inverse(z=z, y=y)
class InverseTransformedDistribution(Distribution):
r"""
Convert inverse flow transformations to distributions.
.. math::
p(x=f^{-1}_{flow}(z)),
where :math:`z \sim p_{prior}(z)`.
Once initializing, it can be handled as a distribution module.
Moreover, this distribution can take a conditional variable.
.. math::
p(x=f^{-1}_{flow}(z, y)),
where :math:`z \sim p_{prior}(z)` and :math:`y` is given.
"""
def __init__(self, prior, flow, var, cond_var=[], name="p"):
if flow.in_features:
features_shape = [flow.in_features]
else:
features_shape = torch.Size()
super().__init__(var, cond_var=cond_var, name=name, features_shape=features_shape)
self.prior = prior
self.flow = flow # FlowList
self._flow_output_var = list(prior.var)
@property
def distribution_name(self):
return "InverseTransformedDistribution"
@property
def flow_output_var(self):
return self._flow_output_var
@property
def prob_factorized_text(self):
var_text = ','.join(self.flow_output_var + self.cond_var)
flow_text = "{}=f^{{-1}}_{{flow}}({})".format(self.var[0], var_text)
prob_text = "{}({})".format(self._name, flow_text)
return prob_text
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self.flow.logdet_jacobian
def sample(self, x_dict={}, batch_n=None, sample_shape=torch.Size(), return_all=True, reparam=False,
return_hidden=True, sample_mean=False, **kwargs):
# sample from the prior
sample_dict = self.prior.sample(x_dict, batch_n=batch_n, sample_shape=sample_shape, return_all=False,
reparam=reparam, sample_mean=sample_mean, **kwargs)
# inverse flow transformation
_z = get_dict_values(sample_dict, self.flow_output_var)
_y = get_dict_values(x_dict, self.cond_var)
if len(_y) == 0:
x = self.inverse(_z[0])
else:
x = self.inverse(_z[0], y=_y[0])
output_dict = {self.var[0]: x}
if return_hidden:
output_dict.update(sample_dict)
if return_all:
x_dict = x_dict.copy()
x_dict.update(output_dict)
return x_dict
return output_dict
@property
def has_reparam(self):
return self.prior.has_reparam
def inference(self, x_dict, return_all=True, compute_jacobian=False):
# flow transformation
_x = get_dict_values(x_dict, self.var)
_y = get_dict_values(x_dict, self.cond_var)
if len(_y) == 0:
z = self.forward(_x[0], compute_jacobian=compute_jacobian)
else:
z = self.forward(_x[0], y=_y[0], compute_jacobian=compute_jacobian)
output_dict = {self.flow_output_var[0]: z}
if return_all:
output_dict.update(x_dict)
return output_dict
def get_log_prob(self, x_dict, sum_features=True, feature_dims=None, **kwargs):
# flow
output_dict = self.inference(x_dict, return_all=True, compute_jacobian=True)
# prior
log_prob_prior = self.prior.get_log_prob(output_dict, sum_features=sum_features, feature_dims=feature_dims,
**kwargs)
return log_prob_prior + self.logdet_jacobian
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
# hotfix: Suppress warnings from pytorch about mixed memory operations
return self.flow.forward(x=x, y=y, compute_jacobian=compute_jacobian).contiguous()
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
return self.flow.inverse(z=z, y=y)
| 9,870 | 28.912121 | 119 | py |
pixyz | pixyz-main/pixyz/flows/conv.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
import scipy as sp
from .flows import Flow
class ChannelConv(Flow):
"""
Invertible 1 × 1 convolution.
Notes
-----
This is implemented with reference to the following code.
https://github.com/chaiyujin/glow-pytorch/blob/master/glow/modules.py
"""
def __init__(self, in_channels, decomposed=False):
super().__init__(in_channels)
w_shape = [in_channels, in_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
if not decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
# LU decomposition
np_p, np_l, np_u = sp.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=np.float32), -1)
eye = np.eye(*w_shape, dtype=np.float32)
self.register_buffer('p', torch.Tensor(np_p.astype(np.float32)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(np.float32)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(np.float32)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(np.float32)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(np.float32)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.decomposed = decomposed
def get_parameters(self, x, inverse):
w_shape = self.w_shape
pixels = np.prod(x.size()[2:])
device = x.device
if not self.decomposed:
logdet_jacobian = torch.slogdet(self.weight.cpu())[1].to(device) * pixels
if not inverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1, 1)
else:
weight = torch.inverse(self.weight.double()).float().view(w_shape[0], w_shape[1], 1, 1)
return weight, logdet_jacobian
else:
self.p = self.p.to(device)
self.sign_s = self.sign_s.to(device)
self.l_mask = self.l_mask.to(device)
self.eye = self.eye.to(device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
logdet_jacobian = torch.sum(self.log_s) * pixels
if not inverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1, 1), logdet_jacobian
def forward(self, x, y=None, compute_jacobian=True):
weight, logdet_jacobian = self.get_parameters(x, inverse=False)
z = F.conv2d(x, weight)
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, x, y=None):
weight, _ = self.get_parameters(x, inverse=True)
z = F.conv2d(x, weight)
return z
| 3,370 | 36.455556 | 115 | py |
pixyz | pixyz-main/pixyz/flows/normalizations.py | import torch
from torch import nn
import numpy as np
from .flows import Flow
from ..utils import epsilon
class BatchNorm1d(Flow):
"""
A batch normalization with the inverse transformation.
Notes
-----
This is implemented with reference to the following code.
https://github.com/ikostrikov/pytorch-flows/blob/master/flows.py#L205
Examples
--------
>>> x = torch.randn(20, 100)
>>> f = BatchNorm1d(100)
>>> # transformation
>>> z = f(x)
>>> # reconstruction
>>> _x = f.inverse(f(x))
>>> # check this reconstruction
>>> diff = torch.sum(torch.abs(_x-x)).item()
>>> diff < 0.1
True
"""
def __init__(self, in_features, momentum=0.0):
super().__init__(in_features)
self.log_gamma = nn.Parameter(torch.zeros(in_features))
self.beta = nn.Parameter(torch.zeros(in_features))
self.momentum = momentum
self.register_buffer('running_mean', torch.zeros(in_features))
self.register_buffer('running_var', torch.ones(in_features))
def forward(self, x, y=None, compute_jacobian=True):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = (x - self.batch_mean).pow(2).mean(0) + epsilon()
self.running_mean = self.running_mean * self.momentum
self.running_var = self.running_var * self.momentum
self.running_mean = self.running_mean + (self.batch_mean.data * (1 - self.momentum))
self.running_var = self.running_var + (self.batch_var.data * (1 - self.momentum))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (x - mean) / var.sqrt()
z = torch.exp(self.log_gamma) * x_hat + self.beta
if compute_jacobian:
self._logdet_jacobian = (self.log_gamma - 0.5 * torch.log(var)).sum(-1)
return z
def inverse(self, z, y=None):
if self.training:
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = (z - self.beta) / torch.exp(self.log_gamma)
x = x_hat * var.sqrt() + mean
return x
class BatchNorm2d(BatchNorm1d):
"""
A batch normalization with the inverse transformation.
Notes
-----
This is implemented with reference to the following code.
https://github.com/ikostrikov/pytorch-flows/blob/master/flows.py#L205
Examples
--------
>>> x = torch.randn(20, 100, 35, 45)
>>> f = BatchNorm2d(100)
>>> # transformation
>>> z = f(x)
>>> # reconstruction
>>> _x = f.inverse(f(x))
>>> # check this reconstruction
>>> diff = torch.sum(torch.abs(_x-x)).item()
>>> diff < 0.1
True
"""
def __init__(self, in_features, momentum=0.0):
super().__init__(in_features, momentum)
self.log_gamma = nn.Parameter(self._unsqueeze(self.log_gamma.data))
self.beta = nn.Parameter(self._unsqueeze(self.beta.data))
self.register_buffer('running_mean', self._unsqueeze(self.running_mean))
self.register_buffer('running_var', self._unsqueeze(self.running_var))
def _unsqueeze(self, x):
return x.unsqueeze(1).unsqueeze(2)
class ActNorm2d(Flow):
"""
Activation Normalization
Initialize the bias and scale with a given minibatch,
so that the output per-channel have zero mean and unit variance for that.
After initialization, `bias` and `logs` will be trained as parameters.
Notes
-----
This is implemented with reference to the following code.
https://github.com/chaiyujin/glow-pytorch/blob/master/glow/modules.py
"""
def __init__(self, in_features, scale=1.):
super().__init__(in_features)
# register mean and scale
size = [1, in_features, 1, 1]
self.register_parameter("bias", nn.Parameter(torch.zeros(*size)))
self.register_parameter("logs", nn.Parameter(torch.zeros(*size)))
self.scale = float(scale)
self.inited = False
def initialize_parameters(self, x):
if not self.training:
return
assert x.device == self.bias.device
with torch.no_grad():
bias = torch.mean(x.clone(), dim=[0, 2, 3], keepdim=True) * -1.0
vars = torch.mean((x.clone() + bias) ** 2, dim=[0, 2, 3], keepdim=True)
logs = torch.log(self.scale / (torch.sqrt(vars) + epsilon()))
self.bias.data.copy_(bias.data)
self.logs.data.copy_(logs.data)
self.inited = True
def _center(self, x, inverse=False):
if not inverse:
return x + self.bias
else:
return x - self.bias
def _scale(self, x, compute_jacobian=True, inverse=False):
logs = self.logs
if not inverse:
x = x * torch.exp(logs)
else:
x = x * torch.exp(-logs)
if compute_jacobian:
"""
logs is log_std of `mean of channels`
so we need to multiply pixels
"""
pixels = np.prod(x.size()[2:])
logdet_jacobian = torch.sum(logs) * pixels
return x, logdet_jacobian
return x, None
def forward(self, x, y=None, compute_jacobian=True):
if not self.inited:
self.initialize_parameters(x)
# center and scale
x = self._center(x, inverse=False)
x, logdet_jacobian = self._scale(x, compute_jacobian, inverse=False)
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return x
def inverse(self, x, y=None):
if not self.inited:
self.initialize_parameters(x)
# scale and center
x, _ = self._scale(x, compute_jacobian=False, inverse=True)
x = self._center(x, inverse=True)
return x
| 5,961 | 29.731959 | 96 | py |
pixyz | pixyz-main/pixyz/flows/coupling.py | import torch
import numpy as np
from .flows import Flow
class AffineCoupling(Flow):
r"""
Affine coupling layer
.. math::
:nowrap:
\begin{eqnarray*}
\mathbf{y}_{1:d} &=& \mathbf{x}_{1:d} \\
\mathbf{y}_{d+1:D} &=& \mathbf{x}_{d+1:D} \odot \exp(s(\mathbf{x}_{1:d})+t(\mathbf{x}_{1:d}))
\end{eqnarray*}
"""
def __init__(self, in_features, mask_type="channel_wise",
scale_net=None, translate_net=None, scale_translate_net=None,
inverse_mask=False):
super().__init__(in_features)
# mask initializations
if mask_type in ["checkerboard", "channel_wise"]:
self.mask_type = mask_type
else:
raise ValueError
self.inverse_mask = inverse_mask
self.scale_net = None
self.translate_net = None
self.scale_translate_net = None
if scale_net and translate_net:
self.scale_net = scale_net
self.translate_net = translate_net
elif scale_translate_net:
self.scale_translate_net = scale_translate_net
else:
raise ValueError
def build_mask(self, x):
"""
Parameters
----------
x : torch.Tensor
Returns
-------
mask : torch.tensor
Examples
--------
>>> scale_translate_net = lambda x: (x, x)
>>> f1 = AffineCoupling(4, mask_type="channel_wise", scale_translate_net=scale_translate_net,
... inverse_mask=False)
>>> x1 = torch.randn([1,4,3,3])
>>> f1.build_mask(x1)
tensor([[[[1.]],
<BLANKLINE>
[[1.]],
<BLANKLINE>
[[0.]],
<BLANKLINE>
[[0.]]]])
>>> f2 = AffineCoupling(2, mask_type="checkerboard", scale_translate_net=scale_translate_net,
... inverse_mask=True)
>>> x2 = torch.randn([1,2,5,5])
>>> f2.build_mask(x2)
tensor([[[[0., 1., 0., 1., 0.],
[1., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.],
[1., 0., 1., 0., 1.],
[0., 1., 0., 1., 0.]]]])
"""
if x.dim() == 4:
[_, channels, height, width] = x.shape
if self.mask_type == "checkerboard":
mask = checkerboard_mask(height, width, self.inverse_mask)
return torch.from_numpy(mask).view(1, 1, height, width).to(x.device)
else:
mask = channel_wise_mask(channels, self.inverse_mask)
return torch.from_numpy(mask).view(1, channels, 1, 1).to(x.device)
elif x.dim() == 2:
[_, n_features] = x.shape
if self.mask_type != "checkerboard":
mask = channel_wise_mask(n_features, self.inverse_mask)
return torch.from_numpy(mask).view(1, n_features).to(x.device)
raise ValueError
def get_parameters(self, x, y=None):
r"""
Parameters
----------
x : torch.tensor
y : torch.tensor
Returns
-------
s : torch.tensor
t : torch.tensor
Examples
--------
>>> # In case of using scale_translate_net
>>> scale_translate_net = lambda x: (x, x)
>>> f1 = AffineCoupling(4, mask_type="channel_wise", scale_translate_net=scale_translate_net,
... inverse_mask=False)
>>> x1 = torch.randn([1,4,3,3])
>>> log_s, t = f1.get_parameters(x1)
>>> # In case of using scale_net and translate_net
>>> scale_net = lambda x: x
>>> translate_net = lambda x: x
>>> f2 = AffineCoupling(4, mask_type="channel_wise", scale_net=scale_net, translate_net=translate_net,
... inverse_mask=False)
>>> x2 = torch.randn([1,4,3,3])
>>> log_s, t = f2.get_parameters(x2)
"""
if self.scale_translate_net:
if y is None:
log_s, t = self.scale_translate_net(x)
else:
log_s, t = self.scale_translate_net(x, y)
else:
if y is None:
log_s = self.scale_net(x)
t = self.translate_net(x)
else:
log_s = self.scale_net(x, y)
t = self.translate_net(x, y)
return log_s, t
def forward(self, x, y=None, compute_jacobian=True):
mask = self.build_mask(x)
x_masked = mask * x
x_inv_masked = (1 - mask) * x
log_s, t = self.get_parameters(x_masked, y)
log_s = log_s * (1 - mask)
t = t * (1 - mask)
x = x_masked + x_inv_masked * torch.exp(log_s) + t
if compute_jacobian:
self._logdet_jacobian = log_s.contiguous().view(log_s.size(0), -1).sum(-1)
return x
def inverse(self, z, y=None):
mask = self.build_mask(z)
z_masked = mask * z
z_inv_masked = (1 - mask) * z
log_s, t = self.get_parameters(z_masked, y)
log_s = log_s * (1 - mask)
t = t * (1 - mask)
z = z_masked + (z_inv_masked - t) * torch.exp(-log_s)
return z
def extra_repr(self):
return 'in_features={}, mask_type={}, inverse_mask={}'.format(
self.in_features, self.mask_type, self.inverse_mask
)
def checkerboard_mask(height, width, inverse_mask=False):
r"""
Parameters
----------
height : int
width : int
inverse_mask : bool
Returns
-------
mask : np.array
Examples
--------
>>> checkerboard_mask(5, 4, False)
array([[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.]], dtype=float32)
>>> checkerboard_mask(5, 4, True)
array([[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)
"""
mask = np.arange(height).reshape(-1, 1) + np.arange(width)
mask = np.mod((inverse_mask is False) + mask, 2)
return mask.astype(np.float32)
def channel_wise_mask(channels, inverse_mask=False):
r"""
Parameters
----------
channels : int
inverse_mask : bool
Returns
-------
mask : np.array
Examples
--------
>>> channel_wise_mask(6, False)
array([1., 1., 1., 0., 0., 0.], dtype=float32)
>>> channel_wise_mask(6, True)
array([0., 0., 0., 1., 1., 1.], dtype=float32)
"""
mask = np.zeros(channels).astype(np.float32)
if inverse_mask:
mask[channels // 2:] = 1
else:
mask[:channels // 2] = 1
return mask
| 6,754 | 27.263598 | 110 | py |
pixyz | pixyz-main/pixyz/flows/normalizing_flows.py | import math
import torch
from torch import nn
from torch.nn import functional as F
from ..utils import epsilon
from .flows import Flow
class PlanarFlow(Flow):
r"""
Planar flow.
.. math::
f(\mathbf{x}) = \mathbf{x} + \mathbf{u} h( \mathbf{w}^T \mathbf{x} + \mathbf{b})
"""
def __init__(self, in_features, constraint_u=False):
super().__init__(in_features)
self.w = nn.Parameter(torch.Tensor(1, in_features))
self.b = nn.Parameter(torch.Tensor(1))
self.u = nn.Parameter(torch.Tensor(1, in_features))
self.reset_parameters()
self.constraint_u = constraint_u
def deriv_tanh(self, x):
return 1 - torch.tanh(x) ** 2
def reset_parameters(self):
std = 1. / math.sqrt(self.w.size(1))
self.w.data.uniform_(-std, std)
self.b.data.uniform_(-std, std)
self.u.data.uniform_(-std, std)
def forward(self, x, y=None, compute_jacobian=True):
if self.constraint_u:
# modify :attr:`u` so that this flow can be invertible.
wu = torch.mm(self.w, self.u.t()) # (1, 1)
m_wu = -1. + F.softplus(wu)
w_normalized = self.w / torch.norm(self.w, keepdim=True)
u_hat = self.u + ((m_wu - wu) * w_normalized) # (1, in_features)
else:
u_hat = self.u
# compute the flow transformation
linear_output = F.linear(x, self.w, self.b) # (n_batch, 1)
z = x + u_hat * torch.tanh(linear_output)
if compute_jacobian:
# compute the log-det Jacobian (logdet|dz/dx|)
psi = self.deriv_tanh(linear_output) * self.w # (n_batch, in_features)
det_jacobian = 1. + torch.mm(psi, u_hat.t()).squeeze() # (n_batch, 1) -> (n_batch)
logdet_jacobian = torch.log(torch.abs(det_jacobian) + epsilon())
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, z, y=None):
raise NotImplementedError()
def extra_repr(self):
return 'in_features={}, constraint_u={}'.format(
self.in_features, self.constraint_u
)
| 2,136 | 29.971014 | 95 | py |
pixyz | pixyz-main/pixyz/flows/flows.py | from torch import nn
class Flow(nn.Module):
"""Flow class. In Pixyz, all flows are required to inherit this class."""
def __init__(self, in_features):
"""
Parameters
----------
in_features : int
Size of input data.
"""
super().__init__()
self._in_features = in_features
self._logdet_jacobian = None
@property
def in_features(self):
return self._in_features
def forward(self, x, y=None, compute_jacobian=True):
"""
Forward propagation of flow layers.
Parameters
----------
x : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
compute_jacobian : bool, defaults to True
Whether to calculate and store log-determinant Jacobian.
If true, calculated Jacobian values are stored in :attr:`logdet_jacobian`.
Returns
-------
z : torch.Tensor
"""
z = x
return z
def inverse(self, z, y=None):
"""
Backward (inverse) propagation of flow layers.
In this method, log-determinant Jacobian is not calculated.
Parameters
----------
z : torch.Tensor
Input data.
y : torch.Tensor, defaults to None
Data for conditioning.
Returns
-------
x : torch.Tensor
"""
x = z
return x
@property
def logdet_jacobian(self):
"""
Get log-determinant Jacobian.
Before calling this, you should run :attr:`forward` or :attr:`update_jacobian` methods to calculate and
store log-determinant Jacobian.
"""
return self._logdet_jacobian
class FlowList(Flow):
def __init__(self, flow_list):
"""
Hold flow modules in a list.
Once initializing, it can be handled as a single flow module.
Notes
-----
Indexing is not supported for now.
Parameters
----------
flow_list : list
"""
super().__init__(flow_list[0].in_features)
self.flow_list = nn.ModuleList(flow_list)
def forward(self, x, y=None, compute_jacobian=True):
logdet_jacobian = 0
for flow in self.flow_list:
x = flow.forward(x, y, compute_jacobian)
if compute_jacobian:
logdet_jacobian = logdet_jacobian + flow.logdet_jacobian
if compute_jacobian:
self._logdet_jacobian = logdet_jacobian
return x
def inverse(self, z, y=None):
for flow in self.flow_list[::-1]:
z = flow.inverse(z, y)
return z
def __repr__(self):
# rename "ModuleList" to "FlowList"
flow_list_repr = self.flow_list.__repr__().replace("ModuleList", "FlowList")
return flow_list_repr
| 2,914 | 23.291667 | 111 | py |
pixyz | pixyz-main/pixyz/flows/operations.py | import torch
import torch.nn.functional as F
import numpy as np
from .flows import Flow
from ..utils import sum_samples
class Squeeze(Flow):
"""
Squeeze operation.
c * s * s -> 4c * s/2 * s/2
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,1,4,4)
>>> print(a)
tensor([[[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
>>> f = Squeeze()
>>> print(f(a))
tensor([[[[ 1, 3],
[ 9, 11]],
<BLANKLINE>
[[ 2, 4],
[10, 12]],
<BLANKLINE>
[[ 5, 7],
[13, 15]],
<BLANKLINE>
[[ 6, 8],
[14, 16]]]])
>>> print(f.inverse(f(a)))
tensor([[[[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12],
[13, 14, 15, 16]]]])
"""
def __init__(self):
super().__init__(None)
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
[_, channels, height, width] = x.shape
if height % 2 != 0 or width % 2 != 0:
raise ValueError
x = x.permute(0, 2, 3, 1)
x = x.view(-1, height // 2, 2, width // 2, 2, channels)
x = x.permute(0, 1, 3, 5, 2, 4)
x = x.contiguous().view(-1, height // 2, width // 2, channels * 4)
z = x.permute(0, 3, 1, 2)
return z
def inverse(self, z, y=None):
[_, channels, height, width] = z.shape
if channels % 4 != 0:
raise ValueError
z = z.permute(0, 2, 3, 1)
z = z.view(-1, height, width, channels // 4, 2, 2)
z = z.permute(0, 1, 4, 2, 5, 3)
z = z.contiguous().view(-1, 2 * height, 2 * width, channels // 4)
x = z.permute(0, 3, 1, 2)
return x
class Unsqueeze(Squeeze):
"""
Unsqueeze operation.
c * s * s -> c/4 * 2s * 2s
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,4,2,2)
>>> print(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
>>> f = Unsqueeze()
>>> print(f(a))
tensor([[[[ 1, 5, 2, 6],
[ 9, 13, 10, 14],
[ 3, 7, 4, 8],
[11, 15, 12, 16]]]])
>>> print(f.inverse(f(a)))
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
"""
def forward(self, x, y=None, compute_jacobian=True):
return super().inverse(x)
def inverse(self, z, y=None):
return super().forward(z)
class Permutation(Flow):
"""
Examples
--------
>>> import torch
>>> a = torch.tensor([i+1 for i in range(16)]).view(1,4,2,2)
>>> print(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
>>> perm = [0,3,1,2]
>>> f = Permutation(perm)
>>> f(a)
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[13, 14],
[15, 16]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]]]])
>>> f.inverse(f(a))
tensor([[[[ 1, 2],
[ 3, 4]],
<BLANKLINE>
[[ 5, 6],
[ 7, 8]],
<BLANKLINE>
[[ 9, 10],
[11, 12]],
<BLANKLINE>
[[13, 14],
[15, 16]]]])
"""
def __init__(self, permute_indices):
super().__init__(len(permute_indices))
self.permute_indices = permute_indices
self.inv_permute_indices = np.argsort(self.permute_indices)
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
if x.dim() == 2:
return x[:, self.permute_indices]
elif x.dim() == 4:
return x[:, self.permute_indices, :, :]
raise ValueError
def inverse(self, z, y=None):
if z.dim() == 2:
return z[:, self.inv_permute_indices]
elif z.dim() == 4:
return z[:, self.inv_permute_indices, :, :]
raise ValueError
class Shuffle(Permutation):
def __init__(self, in_features):
permute_indices = np.random.permutation(in_features)
super().__init__(permute_indices)
class Reverse(Permutation):
def __init__(self, in_features):
permute_indices = np.array(np.arange(0, in_features)[::-1])
super().__init__(permute_indices)
class Flatten(Flow):
def __init__(self, in_size=None):
super().__init__(None)
self.in_size = in_size
self._logdet_jacobian = 0
def forward(self, x, y=None, compute_jacobian=True):
self.in_size = x.shape[1:]
return x.view(x.size(0), -1)
def inverse(self, z, y=None):
if self.in_size is None:
raise ValueError
return z.view(z.size(0), self.in_size[0], self.in_size[1], self.in_size[2])
class Preprocess(Flow):
def __init__(self):
super().__init__(None)
self.register_buffer('data_constraint', torch.tensor([0.05], dtype=torch.float32))
@staticmethod
def logit(x):
return x.log() - (1. - x).log()
def forward(self, x, y=None, compute_jacobian=True):
# 1. transform the domain of x from [0, 1] to [0, 255]
x = x * 255
# 2-1. add noise to pixels to dequantize them and transform its domain ([0, 255]->[0, 1]).
x = (x + torch.rand_like(x)) / 256.
# 2-2. transform pixel values with logit to be unconstrained ([0, 1]->(0, 1)).
x = (1 + (2 * x - 1) * (1 - self.data_constraint)) / 2.
# 2-3. apply the logit function ((0, 1)->(-inf, inf)).
z = self.logit(x)
if compute_jacobian:
# log-det Jacobian of transformation
logdet_jacobian = F.softplus(z) + F.softplus(-z) \
- F.softplus(self.data_constraint.log() - (1. - self.data_constraint).log())
logdet_jacobian = sum_samples(logdet_jacobian)
self._logdet_jacobian = logdet_jacobian
return z
def inverse(self, z, y=None):
# transform the domain of z from (-inf, inf) to (0, 1).
return torch.sigmoid(z)
| 6,742 | 24.541667 | 98 | py |
pixyz | pixyz-main/pixyz/models/vi.py | from torch import optim
from ..models.model import Model
from ..utils import tolist
from ..losses import ELBO
class VI(Model):
"""
Variational Inference (Amortized inference)
The ELBO for given distributions (p, approximate_dist) is set as the loss class of this model.
"""
def __init__(self, p, approximate_dist,
other_distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (distribution).
approximate_dist : torch.distributions.Distribution
Approximate posterior distribution.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p, approximate_dist] + tolist(other_distributions)
# set losses
elbo = ELBO(p, approximate_dist)
loss = -elbo.mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 1,794 | 31.636364 | 98 | py |
pixyz | pixyz-main/pixyz/models/model.py | from torch import optim, nn
import torch
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
import re
from ..utils import tolist
from ..distributions.distributions import Distribution
class Model(object):
"""
This class is for training and testing a loss class.
It requires a defined loss class, distributions to train, and optimizer for initialization.
Examples
--------
>>> import torch
>>> from torch import optim
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler
...
>>> # Set distributions (Distribution API)
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(128, 64)
... self.model_scale = torch.nn.Linear(128, 64)
... def forward(self, x):
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
...
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(64, 128)
... def forward(self, z):
... return {"probs": torch.sigmoid(self.model(z))}
...
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[64], name="p_{prior}")
...
>>> # Define a loss function (Loss API)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> loss_cls = (reconst - kl).mean()
>>> print(loss_cls)
mean \\left(- D_{KL} \\left[q(z|x)||p_{prior}(z) \\right] - \\mathbb{E}_{q(z|x)} \\left[\\log p(x|z) \\right] \\right)
>>>
>>> # Set a model (Model API)
>>> model = Model(loss=loss_cls, distributions=[p, q],
... optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
>>> # Train and test the model
>>> data = torch.randn(1, 128) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> test_loss = model.test({"x": data})
"""
def __init__(self, loss,
test_loss=None,
distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None,
retain_graph=False):
"""
Parameters
----------
loss : pixyz.losses.Loss
Loss class for training.
test_loss : pixyz.losses.Loss
Loss class for testing.
distributions : list
List of :class:`pixyz.distributions.Distribution`.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set losses
self.loss_cls = None
self.test_loss_cls = None
self.set_loss(loss, test_loss)
# set distributions (for training)
self.distributions = nn.ModuleList(tolist(distributions))
# set params and optim
params = self.distributions.parameters()
self.optimizer = optimizer(params, **optimizer_params)
self.clip_norm = clip_grad_norm
self.clip_value = clip_grad_value
self.retain_graph = retain_graph
def __str__(self):
prob_text = []
func_text = []
for prob in self.distributions._modules.values():
if isinstance(prob, Distribution):
prob_text.append(prob.prob_text)
else:
func_text.append(prob.__str__())
text = "Distributions (for training):\n {}\n".format(", ".join(prob_text))
if len(func_text) > 0:
text += "Deterministic functions (for training):\n {}\n".format(", ".join(func_text))
text += "Loss function:\n {}\n".format(str(self.loss_cls))
optimizer_text = re.sub('^', ' ' * 2, str(self.optimizer), flags=re.MULTILINE)
text += "Optimizer:\n{}".format(optimizer_text)
return text
def set_loss(self, loss, test_loss=None):
self.loss_cls = loss
if test_loss:
self.test_loss_cls = test_loss
else:
self.test_loss_cls = loss
def train(self, train_x_dict={}, **kwargs):
"""Train the model.
Parameters
----------
train_x_dict : dict
Input data.
**kwargs
Returns
-------
loss : torch.Tensor
Train loss value
"""
self.distributions.train()
self.optimizer.zero_grad()
loss = self.loss_cls.eval(train_x_dict, **kwargs)
# backprop
loss.backward(retain_graph=self.retain_graph)
if self.clip_norm:
clip_grad_norm_(self.distributions.parameters(), self.clip_norm)
if self.clip_value:
clip_grad_value_(self.distributions.parameters(), self.clip_value)
# update params
self.optimizer.step()
return loss
def test(self, test_x_dict={}, **kwargs):
"""Test the model.
Parameters
----------
test_x_dict : dict
Input data
**kwargs
Returns
-------
loss : torch.Tensor
Test loss value
"""
self.distributions.eval()
with torch.no_grad():
loss = self.test_loss_cls.eval(test_x_dict, **kwargs)
return loss
def save(self, path):
"""Save the model. The only parameters that are saved are those that are included in the distribution.
Parameters such as device, optimizer, placement of clip_grad, etc. are not saved.
Parameters
----------
path : str
Target file path
"""
torch.save({
'distributions': self.distributions.state_dict(),
}, path)
def load(self, path):
"""Load the model.
Parameters
----------
path : str
Target file path
"""
checkpoint = torch.load(path)
self.distributions.load_state_dict(checkpoint['distributions'])
| 6,465 | 29.790476 | 122 | py |
pixyz | pixyz-main/pixyz/models/vae.py | from torch import optim
from ..models.model import Model
from ..utils import tolist
class VAE(Model):
"""
Variational Autoencoder.
In VAE class, reconstruction loss on given distributions (encoder and decoder) is set as the default loss class.
However, if you want to add additional terms, e.g., the KL divergence between encoder and prior,
you need to set them to the `regularizer` argument, which defaults to None.
References
----------
[Kingma+ 2013] Auto-Encoding Variational Bayes
"""
def __init__(self, encoder, decoder,
other_distributions=[],
regularizer=None,
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
encoder : torch.distributions.Distribution
Encoder distribution.
decoder : torch.distributions.Distribution
Decoder distribution.
regularizer : torch.losses.Loss, defaults to None
If you want to add additional terms to the loss, set them to this argument.
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [encoder, decoder] + tolist(other_distributions)
# set losses
reconstruction = -decoder.log_prob().expectation(encoder)
loss = (reconstruction + regularizer).mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 2,227 | 34.935484 | 116 | py |
pixyz | pixyz-main/pixyz/models/gan.py | from torch import optim
from ..models.model import Model
from ..losses import AdversarialJensenShannon
from ..distributions import EmpiricalDistribution
class GAN(Model):
r"""
Generative Adversarial Network
(Adversarial) Jensen-Shannon divergence between given distributions (p_data, p)
is set as the loss class of this model.
Examples
--------
>>> import torch
>>> from torch import nn, optim
>>> from pixyz.distributions import Deterministic
>>> from pixyz.distributions import Normal
>>> from pixyz.models import GAN
>>> from pixyz.utils import print_latex
>>> x_dim = 128
>>> z_dim = 100
...
>>> # Set distributions (Distribution API)
...
>>> # generator model p(x|z)
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Sequential(
... nn.Linear(z_dim, x_dim),
... nn.Sigmoid()
... )
... def forward(self, z):
... x = self.model(z)
... return {"x": x}
...
>>> # prior model p(z)
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[z_dim], name="p_{prior}")
...
>>> # generative model
>>> p_g = Generator()
>>> p = (p_g*prior).marginalize_var("z")
...
>>> # discriminator model p(t|x)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Sequential(
... nn.Linear(x_dim, 1),
... nn.Sigmoid()
... )
... def forward(self, x):
... t = self.model(x)
... return {"t": t}
...
>>> d = Discriminator()
>>> # Set a model (Model API)
>>> model = GAN(p, d, optimizer_params={"lr":0.0002}, d_optimizer_params={"lr":0.0002})
>>> print(model)
Distributions (for training):
p(x)
Loss function:
mean(D_{JS}^{Adv} \left[p_{data}(x)||p(x) \right])
Optimizer:
Adam (
Parameter Group 0
amsgrad: False
betas: (0.9, 0.999)
eps: 1e-08
lr: 0.0002
weight_decay: 0
)
>>> # Train and test the model
>>> data = torch.randn(1, x_dim) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> test_loss = model.test({"x": data})
"""
def __init__(self, p, discriminator,
optimizer=optim.Adam,
optimizer_params={},
d_optimizer=optim.Adam,
d_optimizer_params={},
clip_grad_norm=None,
clip_grad_value=None):
"""
Parameters
----------
p : torch.distributions.Distribution
Generative model (generator).
discriminator : torch.distributions.Distribution
Critic (discriminator).
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p]
p_data = EmpiricalDistribution(p.var)
# set losses
loss = AdversarialJensenShannon(p_data, p, discriminator, optimizer=d_optimizer,
optimizer_params=d_optimizer_params)
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
train_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to train the discriminator.
**kwargs
Returns
-------
loss : torch.Tensor
Train loss value.
d_loss : torch.Tensor
Train loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
if adversarial_loss:
d_loss = self.loss_cls.loss_train(train_x_dict, **kwargs)
loss = super().train(train_x_dict, **kwargs)
if adversarial_loss:
return loss, d_loss
return loss
def test(self, test_x_dict={}, adversarial_loss=True, **kwargs):
"""Train the model.
Parameters
----------
test_x_dict : dict, defaults to {}
Input data.
adversarial_loss : bool, defaults to True
Whether to return the discriminator loss.
**kwargs
Returns
-------
loss : torch.Tensor
Test loss value.
d_loss : torch.Tensor
Test loss value of the discriminator (if :attr:`adversarial_loss` is True).
"""
loss = super().test(test_x_dict, **kwargs)
if adversarial_loss:
d_loss = self.loss_cls.loss_test(test_x_dict, **kwargs)
return loss, d_loss
return loss
| 5,491 | 30.745665 | 91 | py |
pixyz | pixyz-main/pixyz/models/ml.py | from torch import optim
from ..models.model import Model
from ..utils import tolist
class ML(Model):
"""
Maximum Likelihood (log-likelihood)
The negative log-likelihood of a given distribution (p) is set as the loss class of this model.
"""
def __init__(self, p,
other_distributions=[],
optimizer=optim.Adam,
optimizer_params={},
clip_grad_norm=False,
clip_grad_value=False):
"""
Parameters
----------
p : torch.distributions.Distribution
Classifier (distribution).
optimizer : torch.optim
Optimization algorithm.
optimizer_params : dict
Parameters of optimizer
clip_grad_norm : float or int
Maximum allowed norm of the gradients.
clip_grad_value : float or int
Maximum allowed value of the gradients.
"""
# set distributions (for training)
distributions = [p] + tolist(other_distributions)
# set losses
self.nll = -p.log_prob(sum_features=True)
loss = self.nll.mean()
super().__init__(loss, test_loss=loss,
distributions=distributions,
optimizer=optimizer, optimizer_params=optimizer_params,
clip_grad_norm=clip_grad_norm, clip_grad_value=clip_grad_value)
def train(self, train_x_dict={}, **kwargs):
return super().train(train_x_dict, **kwargs)
def test(self, test_x_dict={}, **kwargs):
return super().test(test_x_dict, **kwargs)
| 1,624 | 30.862745 | 99 | py |
pixyz | pixyz-main/pixyz/layers/resnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .norm_util import WNConv2d
class ResidualBlock(nn.Module):
"""ResNet basic block with weight norm."""
def __init__(self, in_channels, out_channels):
super().__init__()
self.in_norm = nn.BatchNorm2d(in_channels)
self.in_conv = WNConv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False)
self.out_norm = nn.BatchNorm2d(out_channels)
self.out_conv = WNConv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=True)
def forward(self, x):
skip = x
x = self.in_norm(x)
x = F.relu(x)
x = self.in_conv(x)
x = self.out_norm(x)
x = F.relu(x)
x = self.out_conv(x)
x = x + skip
return x
class ResNet(nn.Module):
"""ResNet for scale and translate factors in Real NVP.
Args:
in_channels (int): Number of channels in the input.
mid_channels (int): Number of channels in the intermediate layers.
out_channels (int): Number of channels in the output.
num_blocks (int): Number of residual blocks in the network.
kernel_size (int): Side length of each filter in convolutional layers.
padding (int): Padding for convolutional layers.
double_after_norm (bool): Double input after input BatchNorm.
"""
def __init__(self, in_channels, mid_channels, out_channels,
num_blocks, kernel_size, padding, double_after_norm):
super().__init__()
self.in_norm = nn.BatchNorm2d(in_channels)
self.double_after_norm = double_after_norm
self.in_conv = WNConv2d(2 * in_channels, mid_channels, kernel_size, padding, bias=True)
self.in_skip = WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True)
self.blocks = nn.ModuleList([ResidualBlock(mid_channels, mid_channels)
for _ in range(num_blocks)])
self.skips = nn.ModuleList([WNConv2d(mid_channels, mid_channels, kernel_size=1, padding=0, bias=True)
for _ in range(num_blocks)])
self.out_norm = nn.BatchNorm2d(mid_channels)
self.out_conv = WNConv2d(mid_channels, out_channels, kernel_size=1, padding=0, bias=True)
def forward(self, x):
x = self.in_norm(x)
if self.double_after_norm:
x *= 2.
x = torch.cat((x, -x), dim=1)
x = F.relu(x)
x = self.in_conv(x)
x_skip = self.in_skip(x)
for block, skip in zip(self.blocks, self.skips):
x = block(x)
x_skip += skip(x)
x = self.out_norm(x_skip)
x = F.relu(x)
x = self.out_conv(x)
return x
| 2,757 | 33.475 | 109 | py |
pixyz | pixyz-main/pixyz/layers/norm_util.py | import torch.nn as nn
class WNConv2d(nn.Module):
"""Weight-normalized 2d convolution.
Args:
in_channels (int): Number of channels in the input.
out_channels (int): Number of channels in the output.
kernel_size (int): Side length of each convolutional kernel.
padding (int): Padding to add on edges of input.
bias (bool): Use bias in the convolution operation.
"""
def __init__(self, in_channels, out_channels, kernel_size, padding, bias=True):
super(WNConv2d, self).__init__()
self.conv = nn.utils.weight_norm(
nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, bias=bias))
def forward(self, x):
x = self.conv(x)
return x
| 746 | 32.954545 | 90 | py |
pixyz | pixyz-main/pixyz/losses/losses.py | import abc
import sympy
import torch
from torch.nn import DataParallel
from torch.nn.parallel import DistributedDataParallel
import numbers
from copy import deepcopy
from ..utils import get_dict_values
class Loss(torch.nn.Module, metaclass=abc.ABCMeta):
"""Loss class. In Pixyz, all loss classes are required to inherit this class.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler
...
>>> # Set distributions
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(128, 64)
... self.model_scale = torch.nn.Linear(128, 64)
... def forward(self, x):
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
...
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(64, 128)
... def forward(self, z):
... return {"probs": torch.sigmoid(self.model(z))}
...
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[64], name="p_{prior}")
...
>>> # Define a loss function (VAE)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> loss_cls = (reconst - kl).mean()
>>> print(loss_cls)
mean \\left(- D_{KL} \\left[q(z|x)||p_{prior}(z) \\right] - \\mathbb{E}_{q(z|x)} \\left[\\log p(x|z) \\right] \\right)
>>> # Evaluate this loss function
>>> data = torch.randn(1, 128) # Pseudo data
>>> loss = loss_cls.eval({"x": data})
>>> print(loss) # doctest: +SKIP
tensor(65.5939, grad_fn=<MeanBackward0>)
"""
def __init__(self, input_var=None):
"""
Parameters
----------
input_var : :obj:`list` of :obj:`str`, defaults to None
Input variables of this loss function.
In general, users do not need to set them explicitly
because these depend on the given distributions and each loss function.
"""
super().__init__()
self._input_var = deepcopy(input_var)
@property
def input_var(self):
"""list: Input variables of this distribution."""
return self._input_var
@property
@abc.abstractmethod
def _symbol(self):
raise NotImplementedError()
@property
def loss_text(self):
return sympy.latex(self._symbol)
def __str__(self):
return self.loss_text
def __repr__(self):
return self.loss_text
def __add__(self, other):
return AddLoss(self, other)
def __radd__(self, other):
return AddLoss(other, self)
def __sub__(self, other):
return SubLoss(self, other)
def __rsub__(self, other):
return SubLoss(other, self)
def __mul__(self, other):
return MulLoss(self, other)
def __rmul__(self, other):
return MulLoss(other, self)
def __truediv__(self, other):
return DivLoss(self, other)
def __rtruediv__(self, other):
return DivLoss(other, self)
def __neg__(self):
return NegLoss(self)
def abs(self):
"""Return an instance of :class:`pixyz.losses.losses.AbsLoss`.
Returns
-------
pixyz.losses.losses.AbsLoss
An instance of :class:`pixyz.losses.losses.AbsLoss`
"""
return AbsLoss(self)
def mean(self):
"""Return an instance of :class:`pixyz.losses.losses.BatchMean`.
Returns
-------
pixyz.losses.losses.BatchMean
An instance of :class:`pixyz.losses.BatchMean`
"""
return BatchMean(self)
def sum(self):
"""Return an instance of :class:`pixyz.losses.losses.BatchSum`.
Returns
-------
pixyz.losses.losses.BatchSum
An instance of :class:`pixyz.losses.losses.BatchSum`
"""
return BatchSum(self)
def detach(self):
"""Return an instance of :class:`pixyz.losses.losses.Detach`.
Returns
-------
pixyz.losses.losses.Detach
An instance of :class:`pixyz.losses.losses.Detach`
"""
return Detach(self)
def expectation(self, p, sample_shape=torch.Size()):
"""Return an instance of :class:`pixyz.losses.Expectation`.
Parameters
----------
p : pixyz.distributions.Distribution
Distribution for sampling.
sample_shape : :obj:`list` or :obj:`NoneType`, defaults to torch.Size()
Shape of generating samples.
Returns
-------
pixyz.losses.Expectation
An instance of :class:`pixyz.losses.Expectation`
"""
return Expectation(p, self, sample_shape=sample_shape)
def constant_var(self, constant_dict):
"""Return an instance of :class:`pixyz.losses.ConstantVar`.
Parameters
----------
constant_dict : dict
constant variables.
Returns
-------
pixyz.losses.ConstantVar
An instance of :class:`pixyz.losses.ConstantVar`
"""
return ConstantVar(self, constant_dict)
def eval(self, x_dict={}, return_dict=False, return_all=True, **kwargs):
"""Evaluate the value of the loss function given inputs (:attr:`x_dict`).
Parameters
----------
x_dict : :obj:`dict`, defaults to {}
Input variables.
return_dict : bool, default to False.
Whether to return samples along with the evaluated value of the loss function.
return_all : bool, default to True.
Whether to return all samples, including those that have not been updated.
Returns
-------
loss : torch.Tensor
the evaluated value of the loss function.
x_dict : :obj:`dict`
All samples generated when evaluating the loss function.
If :attr:`return_dict` is False, it is not returned.
"""
if not(set(list(x_dict.keys())) >= set(self._input_var)):
raise ValueError("Input keys are not valid, expected {} but got {}.".format(self._input_var,
list(x_dict.keys())))
input_dict = get_dict_values(x_dict, self.input_var, return_dict=True)
loss, eval_dict = self(input_dict, **kwargs)
if return_dict:
output_dict = x_dict.copy() if return_all else {}
output_dict.update(eval_dict)
return loss, output_dict
return loss
@abc.abstractmethod
def forward(self, x_dict, **kwargs):
"""
Parameters
----------
x_dict : dict
Input variables.
Returns
-------
a tuple of :class:`pixyz.losses.Loss` and dict
deterministically calcurated loss and updated all samples.
"""
raise NotImplementedError()
class Divergence(Loss, abc.ABC):
def __init__(self, p, q=None):
"""
Parameters
----------
p : pixyz.distributions.Distribution
Distribution.
q : pixyz.distributions.Distribution, defaults to None
Distribution.
"""
_input_var = deepcopy(p.input_var)
if q is not None:
_input_var += deepcopy(q.input_var)
_input_var = sorted(set(_input_var), key=_input_var.index)
super().__init__(_input_var)
self.p = p
self.q = q
class ValueLoss(Loss):
"""
This class contains a scalar as a loss value.
If multiplying a scalar by an arbitrary loss class, this scalar is converted to the :class:`ValueLoss`.
Examples
--------
>>> loss_cls = ValueLoss(2)
>>> print(loss_cls)
2
>>> loss = loss_cls.eval()
>>> print(loss)
tensor(2.)
"""
def __init__(self, loss1):
super().__init__()
self.original_value = loss1
self.register_buffer('value', torch.tensor(loss1, dtype=torch.float))
self._input_var = []
def forward(self, x_dict={}, **kwargs):
return self.value, {}
@property
def _symbol(self):
return self.original_value
class Parameter(Loss):
"""
This class defines a single variable as a loss class.
It can be used such as a coefficient parameter of a loss class.
Examples
--------
>>> loss_cls = Parameter("x")
>>> print(loss_cls)
x
>>> loss = loss_cls.eval({"x": 2})
>>> print(loss)
2
"""
def __init__(self, input_var):
if not isinstance(input_var, str):
raise ValueError()
super().__init__([input_var])
def forward(self, x_dict={}, **kwargs):
return x_dict[self._input_var[0]], {}
@property
def _symbol(self):
return sympy.Symbol(self._input_var[0])
class ConstantVar(Loss):
"""
This class is defined as a loss class that makes the value of a variable a constant before evaluation.
It can be used to fix the coefficient parameters of the loss class or to condition random variables.
Examples
--------
>>> loss_cls = Parameter('x').constant_var({'x': 1})
>>> print(loss_cls)
x
>>> loss = loss_cls.eval()
>>> print(loss)
1
"""
def __init__(self, base_loss, constant_dict):
_input_var = set(base_loss.input_var) - set(constant_dict.keys())
super().__init__(_input_var)
self.constant_dict = constant_dict
self.base_loss = base_loss
def forward(self, x_dict={}, **kwargs):
input_dict = dict(x_dict)
input_dict.update(self.constant_dict)
return self.base_loss.eval(input_dict, return_dict=True)
@property
def _symbol(self):
return self.base_loss._symbol
class LossOperator(Loss):
def __init__(self, loss1, loss2):
super().__init__()
_input_var = []
if isinstance(loss1, Loss):
_input_var += deepcopy(loss1.input_var)
elif isinstance(loss1, numbers.Number):
loss1 = ValueLoss(loss1)
elif isinstance(loss2, type(None)):
pass
else:
raise ValueError("{} cannot be operated with {}.".format(type(loss1), type(loss2)))
if isinstance(loss2, Loss):
_input_var += deepcopy(loss2.input_var)
elif isinstance(loss2, numbers.Number):
loss2 = ValueLoss(loss2)
elif isinstance(loss2, type(None)):
pass
else:
raise ValueError("{} cannot be operated with {}.".format(type(loss2), type(loss1)))
_input_var = sorted(set(_input_var), key=_input_var.index)
self._input_var = _input_var
self.loss1 = loss1
self.loss2 = loss2
def forward(self, x_dict={}, **kwargs):
if not isinstance(self.loss1, type(None)):
loss1, x1 = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
else:
loss1 = 0
x1 = {}
if not isinstance(self.loss2, type(None)):
loss2, x2 = self.loss2.eval(x_dict, return_dict=True, return_all=False, **kwargs)
else:
loss2 = 0
x2 = {}
x1.update(x2)
return loss1, loss2, x1
class AddLoss(LossOperator):
"""
Apply the `add` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 + loss_cls_2 # equals to AddLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
x + 2
>>> loss = loss_cls.eval({"x": 3})
>>> print(loss)
tensor(5.)
"""
@property
def _symbol(self):
return self.loss1._symbol + self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 + loss2, x_dict
class SubLoss(LossOperator):
"""
Apply the `sub` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 - loss_cls_2 # equals to SubLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
2 - x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(-2.)
>>> loss_cls = loss_cls_2 - loss_cls_1 # equals to SubLoss(loss_cls_2, loss_cls_1)
>>> print(loss_cls)
x - 2
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(2.)
"""
@property
def _symbol(self):
return self.loss1._symbol - self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 - loss2, x_dict
class MulLoss(LossOperator):
"""
Apply the `mul` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 * loss_cls_2 # equals to MulLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
2 x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(8.)
"""
@property
def _symbol(self):
return self.loss1._symbol * self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 * loss2, x_dict
class DivLoss(LossOperator):
"""
Apply the `div` operation to the two losses.
Examples
--------
>>> loss_cls_1 = ValueLoss(2)
>>> loss_cls_2 = Parameter("x")
>>> loss_cls = loss_cls_1 / loss_cls_2 # equals to DivLoss(loss_cls_1, loss_cls_2)
>>> print(loss_cls)
\\frac{2}{x}
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(0.5000)
>>> loss_cls = loss_cls_2 / loss_cls_1 # equals to DivLoss(loss_cls_2, loss_cls_1)
>>> print(loss_cls)
\\frac{x}{2}
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
tensor(2.)
"""
@property
def _symbol(self):
return self.loss1._symbol / self.loss2._symbol
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return loss1 / loss2, x_dict
class MinLoss(LossOperator):
r"""
Apply the `min` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses.losses import ValueLoss, Parameter, MinLoss
>>> loss_min= MinLoss(ValueLoss(3), ValueLoss(1))
>>> print(loss_min)
min \left(3, 1\right)
>>> print(loss_min.eval())
tensor(1.)
"""
def __init__(self, loss1, loss2):
super().__init__(loss1, loss2)
@property
def _symbol(self):
return sympy.Symbol("min \\left({}, {}\\right)".format(self.loss1.loss_text, self.loss2.loss_text))
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return torch.min(loss1, loss2), x_dict
class MaxLoss(LossOperator):
r"""
Apply the `max` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses.losses import ValueLoss, MaxLoss
>>> loss_max= MaxLoss(ValueLoss(3), ValueLoss(1))
>>> print(loss_max)
max \left(3, 1\right)
>>> print(loss_max.eval())
tensor(3.)
"""
def __init__(self, loss1, loss2):
super().__init__(loss1, loss2)
@property
def _symbol(self):
return sympy.Symbol("max \\left({}, {}\\right)".format(self.loss1.loss_text, self.loss2.loss_text))
def forward(self, x_dict={}, **kwargs):
loss1, loss2, x_dict = super().forward(x_dict, **kwargs)
return torch.max(loss1, loss2), x_dict
class LossSelfOperator(Loss):
def __init__(self, loss1):
super().__init__()
_input_var = []
if isinstance(loss1, type(None)):
raise ValueError()
if isinstance(loss1, Loss):
_input_var = deepcopy(loss1.input_var)
elif isinstance(loss1, numbers.Number):
loss1 = ValueLoss(loss1)
else:
raise ValueError()
self._input_var = _input_var
self.loss1 = loss1
def loss_train(self, x_dict={}, **kwargs):
return self.loss1.loss_train(x_dict, **kwargs)
def loss_test(self, x_dict={}, **kwargs):
return self.loss1.loss_test(x_dict, **kwargs)
class NegLoss(LossSelfOperator):
"""
Apply the `neg` operation to the loss.
Examples
--------
>>> loss_cls_1 = Parameter("x")
>>> loss_cls = -loss_cls_1 # equals to NegLoss(loss_cls_1)
>>> print(loss_cls)
- x
>>> loss = loss_cls.eval({"x": 4})
>>> print(loss)
-4
"""
@property
def _symbol(self):
return -self.loss1._symbol
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return -loss, x_dict
class AbsLoss(LossSelfOperator):
"""
Apply the `abs` operation to the loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).abs() # equals to AbsLoss(LogProb(p))
>>> print(loss_cls)
|\\log p(x)|
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([12.9894, 15.5280])
"""
@property
def _symbol(self):
return sympy.Symbol("|{}|".format(self.loss1.loss_text))
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.abs(), x_dict
class BatchMean(LossSelfOperator):
r"""
Average a loss class over given batch data.
.. math::
\mathbb{E}_{p_{data}(x)}[\mathcal{L}(x)] \approx \frac{1}{N}\sum_{i=1}^N \mathcal{L}(x_i),
where :math:`x_i \sim p_{data}(x)` and :math:`\mathcal{L}` is a loss function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).mean() # equals to BatchMean(LogProb(p))
>>> print(loss_cls)
mean \left(\log p(x) \right)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-14.5038)
"""
@property
def _symbol(self):
return sympy.Symbol("mean \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.mean(), x_dict
class BatchSum(LossSelfOperator):
r"""
Summation a loss class over given batch data.
.. math::
\sum_{i=1}^N \mathcal{L}(x_i),
where :math:`x_i \sim p_{data}(x)` and :math:`\mathcal{L}` is a loss function.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> from pixyz.losses import LogProb
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p).sum() # equals to BatchSum(LogProb(p))
>>> print(loss_cls)
sum \left(\log p(x) \right)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-31.9434)
"""
@property
def _symbol(self):
return sympy.Symbol("sum \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.sum(), x_dict
class Detach(LossSelfOperator):
r"""
Apply the `detach` method to the loss.
"""
@property
def _symbol(self):
return sympy.Symbol("detach \\left({} \\right)".format(self.loss1.loss_text)) # TODO: fix it?
def forward(self, x_dict={}, **kwargs):
loss, x_dict = self.loss1.eval(x_dict, return_dict=True, return_all=False, **kwargs)
return loss.detach(), x_dict
class Expectation(Loss):
r"""
Expectation of a given function (Monte Carlo approximation).
.. math::
\mathbb{E}_{p(x)}[f(x)] \approx \frac{1}{L}\sum_{l=1}^L f(x_l),
\quad \text{where}\quad x_l \sim p(x).
Note that :math:`f` doesn't need to be able to sample, which is known as the law of the unconscious statistician
(LOTUS).
Therefore, in this class, :math:`f` is assumed to :attr:`pixyz.Loss`.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Bernoulli
>>> from pixyz.losses import LogProb
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"],
... features_shape=[10]) # q(z|x)
>>> p = Normal(loc="z", scale=torch.tensor(1.), var=["x"], cond_var=["z"],
... features_shape=[10]) # p(x|z)
>>> loss_cls = LogProb(p).expectation(q) # equals to Expectation(q, LogProb(p))
>>> print(loss_cls)
\mathbb{E}_{p(z|x)} \left[\log p(x|z) \right]
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([-12.8181, -12.6062])
>>> loss_cls = LogProb(p).expectation(q,sample_shape=(5,))
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
>>> q = Bernoulli(probs=torch.tensor(0.5), var=["x"], cond_var=[], features_shape=[10]) # q(x)
>>> p = Bernoulli(probs=torch.tensor(0.3), var=["x"], cond_var=[], features_shape=[10]) # p(x)
>>> loss_cls = p.log_prob().expectation(q,sample_shape=[64])
>>> train_loss = loss_cls.eval()
>>> print(train_loss) # doctest: +SKIP
tensor([46.7559])
>>> eval_loss = loss_cls.eval(test_mode=True)
>>> print(eval_loss) # doctest: +SKIP
tensor([-7.6047])
"""
def __init__(self, p, f, sample_shape=torch.Size([1]), reparam=True):
input_var = list(set(p.input_var) | set(f.input_var) - set(p.var))
super().__init__(input_var=input_var)
self.p = p
self.f = f
self.sample_shape = torch.Size(sample_shape)
self.reparam = reparam
@property
def _symbol(self):
p_text = "{" + self.p.prob_text + "}"
return sympy.Symbol("\\mathbb{{E}}_{} \\left[{} \\right]".format(p_text, self.f.loss_text))
def forward(self, x_dict={}, **kwargs):
samples_dicts = [self.p.sample(x_dict, reparam=self.reparam, return_all=False, **kwargs)
for i in range(self.sample_shape.numel())]
loss_and_dicts = []
for samples_dict in samples_dicts:
input_dict = x_dict.copy()
input_dict.update(samples_dict)
loss_and_dicts.append(self.f.eval(input_dict, return_dict=True, return_all=False, **kwargs))
losses = [loss for loss, loss_sample_dict in loss_and_dicts]
# sum over sample_shape
loss = torch.stack(losses).mean(dim=0)
output_dict = {}
output_dict.update(samples_dicts[0])
output_dict.update(loss_and_dicts[0][1])
return loss, output_dict
def REINFORCE(p, f, b=ValueLoss(0), sample_shape=torch.Size([1]), reparam=True):
r"""
Surrogate Loss for Policy Gradient Method (REINFORCE) with a given reward function :math:`f` and a given baseline :math:`b`.
.. math::
\mathbb{E}_{p(x)}[detach(f(x)-b(x))\log p(x)+f(x)-b(x)].
in this function, :math:`f` and :math:`b` is assumed to :attr:`pixyz.Loss`.
Parameters
----------
p : :class:`pixyz.distributions.Distribution`
Distribution for expectation.
f : :class:`pixyz.losses.Loss`
reward function
b : :class:`pixyz.losses.Loss` default to pixyz.losses.ValueLoss(0)
baseline function
sample_shape : :class:`torch.Size` default to torch.Size([1])
sample size for expectation
reparam : :obj: bool default to True
using reparameterization in internal sampling
Returns
-------
surrogate_loss : :class:`pixyz.losses.Loss`
policy gradient can be calcurated from a gradient of this surrogate loss.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Bernoulli
>>> from pixyz.losses import LogProb
>>> q = Bernoulli(probs=torch.tensor(0.5), var=["x"], cond_var=[], features_shape=[10]) # q(x)
>>> p = Bernoulli(probs=torch.tensor(0.3), var=["x"], cond_var=[], features_shape=[10]) # p(x)
>>> loss_cls = REINFORCE(q,p.log_prob(),sample_shape=[64])
>>> train_loss = loss_cls.eval(test_mode=True)
>>> print(train_loss) # doctest: +SKIP
tensor([46.7559])
>>> loss_cls = p.log_prob().expectation(q,sample_shape=[64])
>>> test_loss = loss_cls.eval()
>>> print(test_loss) # doctest: +SKIP
tensor([-7.6047])
"""
return Expectation(p, (f - b).detach() * p.log_prob() + (f - b), sample_shape, reparam=reparam)
class DataParalleledLoss(Loss):
r"""
Loss class wrapper of torch.nn.DataParallel. It can be used as the original loss class.
`eval` & `forward` methods support data-parallel running.
Examples
--------
>>> import torch
>>> from torch import optim
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Bernoulli, Normal
>>> from pixyz.losses import KullbackLeibler, DataParalleledLoss
>>> from pixyz.models import Model
>>> used_gpu_i = set()
>>> used_gpu_g = set()
>>> # Set distributions (Distribution API)
>>> class Inference(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x"],name="q")
... self.model_loc = torch.nn.Linear(12, 6)
... self.model_scale = torch.nn.Linear(12, 6)
... def forward(self, x):
... used_gpu_i.add(x.device.index)
... return {"loc": self.model_loc(x), "scale": F.softplus(self.model_scale(x))}
>>> class Generator(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z"],name="p")
... self.model = torch.nn.Linear(6, 12)
... def forward(self, z):
... used_gpu_g.add(z.device.index)
... return {"probs": torch.sigmoid(self.model(z))}
>>> p = Generator()
>>> q = Inference()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[6], name="p_{prior}")
>>> # Define a loss function (Loss API)
>>> reconst = -p.log_prob().expectation(q)
>>> kl = KullbackLeibler(q,prior)
>>> batch_loss_cls = (reconst - kl)
>>> # device settings
>>> device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
>>> device_count = torch.cuda.device_count()
>>> expected = set(range(device_count)) if torch.cuda.is_available() else {None}
>>> if device_count > 1:
... loss_cls = DataParalleledLoss(batch_loss_cls, device_ids=list(expected)).mean().to(device)
... else:
... loss_cls = batch_loss_cls.mean().to(device)
>>> # Set a model (Model API)
>>> model = Model(loss=loss_cls, distributions=[p, q],
... optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
>>> # Train and test the model
>>> data = torch.randn(10, 12).to(device) # Pseudo data
>>> train_loss = model.train({"x": data})
>>> assert used_gpu_i==expected
>>> assert used_gpu_g==expected
"""
def __init__(self, loss, distributed=False, **kwargs):
super().__init__(loss.input_var)
if distributed:
self.paralleled = DistributedDataParallel(loss, **kwargs)
else:
self.paralleled = DataParallel(loss, **kwargs)
def forward(self, x_dict, **kwargs):
return self.paralleled.forward(x_dict, **kwargs)
@property
def _symbol(self):
return self.paralleled.module._symbol
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.paralleled.module, name)
| 28,918 | 29.83049 | 128 | py |
pixyz | pixyz-main/pixyz/losses/mmd.py | import torch
import sympy
from .losses import Divergence
from ..utils import get_dict_values
class MMD(Divergence):
r"""
The Maximum Mean Discrepancy (MMD).
.. math::
D_{MMD^2}[p||q] = \mathbb{E}_{p(x), p(x')}[k(x, x')] + \mathbb{E}_{q(x), q(x')}[k(x, x')]
- 2\mathbb{E}_{p(x), q(x')}[k(x, x')]
where :math:`k(x, x')` is any positive definite kernel.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="q")
>>> loss_cls = MMD(p, q, kernel="gaussian")
>>> print(loss_cls)
D_{MMD^2} \left[p(z|x)||q(z|x) \right]
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
>>> # Use the inverse (multi-)quadric kernel
>>> loss = MMD(p, q, kernel="inv-multiquadratic").eval({"x": torch.randn(10, 64)})
"""
def __init__(self, p, q, kernel="gaussian", **kernel_params):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
if len(p.var) != 1:
raise ValueError("A given distribution must have only one variable.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
if kernel == "gaussian":
self.kernel = gaussian_rbf_kernel
elif kernel == "inv-multiquadratic":
self.kernel = inverse_multiquadratic_rbf_kernel
else:
raise NotImplementedError()
self.kernel_params = kernel_params
@property
def _symbol(self):
return sympy.Symbol("D_{{MMD^2}} \\left[{}||{} \\right]".format(self.p.prob_text, self.q.prob_text))
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var[0])[0].shape[0]
def forward(self, x_dict={}, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample from distributions
p_x = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.p.var)[0]
q_x = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.q.var)[0]
if p_x.shape != q_x.shape:
raise ValueError("The two distribution variables must have the same shape.")
if len(p_x.shape) != 2:
raise ValueError("The number of axes of a given sample must be 2, got %d" % len(p_x.shape))
p_x_dim = p_x.shape[1]
q_x_dim = q_x.shape[1]
# estimate the squared MMD (unbiased estimator)
p_kernel = self.kernel(p_x, p_x, **self.kernel_params).sum() / (p_x_dim * (p_x_dim - 1))
q_kernel = self.kernel(q_x, q_x, **self.kernel_params).sum() / (q_x_dim * (q_x_dim - 1))
pq_kernel = self.kernel(p_x, q_x, **self.kernel_params).sum() / (p_x_dim * q_x_dim)
mmd_loss = p_kernel + q_kernel - 2 * pq_kernel
return mmd_loss, {}
def pairwise_distance_matrix(x, y, metric="euclidean"):
r"""
Computes the pairwise distance matrix between x and y.
"""
if metric == "euclidean":
return torch.sum((x[:, None, :] - y[None, :, :]) ** 2, dim=-1)
raise NotImplementedError()
def gaussian_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Gaussian radial basis function (RBF) kernel.
.. math::
k(x, y) = \exp (\frac{||x-y||^2}{\sigma^2})
"""
return torch.exp(-pairwise_distance_matrix(x, y) / (1. * sigma_sqr))
def inverse_multiquadratic_rbf_kernel(x, y, sigma_sqr=2., **kwargs):
r"""
Inverse multi-quadratic radial basis function (RBF) kernel.
.. math::
k(x, y) = \frac{\sigma^2}{||x-y||^2 + \sigma^2}
"""
return sigma_sqr / (pairwise_distance_matrix(x, y) + sigma_sqr)
| 3,976 | 31.598361 | 109 | py |
pixyz | pixyz-main/pixyz/losses/adversarial_loss.py | import sympy
from torch import optim, nn
import torch
from .losses import Divergence
from ..utils import get_dict_values, detach_dict
class AdversarialLoss(Divergence):
def __init__(self, p, q, discriminator, optimizer=optim.Adam, optimizer_params={}):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
self.loss_optimizer = optimizer
self.loss_optimizer_params = optimizer_params
self.d = discriminator
params = discriminator.parameters()
self.d_optimizer = optimizer(params, **optimizer_params)
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var)[0].shape[0]
def d_loss(self, y_p, y_q, batch_n):
"""Evaluate a discriminator loss given outputs of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
y_q : torch.Tensor
Output of discriminator given sample from q.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
raise NotImplementedError()
def g_loss(self, y_p, y_q, batch_n):
"""Evaluate a generator loss given outputs of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
y_q : torch.Tensor
Output of discriminator given sample from q.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
raise NotImplementedError()
def loss_train(self, train_x_dict, **kwargs):
"""Train the evaluation metric (discriminator).
Parameters
----------
train_x_dict : dict
Input variables.
**kwargs
Arbitrary keyword arguments.
Returns
-------
loss : torch.Tensor
"""
self.d.train()
self.d_optimizer.zero_grad()
loss = self.eval(train_x_dict, discriminator=True)
# backprop
loss.backward()
# update params
self.d_optimizer.step()
return loss
def loss_test(self, test_x_dict, **kwargs):
"""Test the evaluation metric (discriminator).
Parameters
----------
test_x_dict : dict
Input variables.
**kwargs
Arbitrary keyword arguments.
Returns
-------
loss : torch.Tensor
"""
self.d.eval()
with torch.no_grad():
loss = self.eval(test_x_dict, discriminator=True)
return loss
class AdversarialJensenShannon(AdversarialLoss):
r"""
Jensen-Shannon divergence (adversarial training).
.. math::
D_{JS}[p(x)||q(x)] \leq 2 \cdot D_{JS}[p(x)||q(x)] + 2 \log 2
= \mathbb{E}_{p(x)}[\log d^*(x)] + \mathbb{E}_{q(x)}[\log (1-d^*(x))],
where :math:`d^*(x) = \arg\max_{d} \mathbb{E}_{p(x)}[\log d(x)] + \mathbb{E}_{q(x)}[\log (1-d(x))]`.
This class acts as a metric that evaluates a given distribution (generator).
If you want to learn this evaluation metric itself, i.e., discriminator (critic), use the :class:`train` method.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": torch.sigmoid(self.model(x))}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialJensenShannon(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(D_{JS}^{Adv} \left[p(x)||p_{data}(x) \right])
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(1.3723, grad_fn=<AddBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(1.4990, grad_fn=<AddBackward0>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Goodfellow+ 2014] Generative Adversarial Networks
"""
def __init__(self, p, q, discriminator, optimizer=optim.Adam, optimizer_params={}, inverse_g_loss=True):
super().__init__(p, q, discriminator, optimizer=optimizer, optimizer_params=optimizer_params)
self.bce_loss = nn.BCELoss()
self._inverse_g_loss = inverse_g_loss
@property
def _symbol(self):
return sympy.Symbol("mean(D_{{JS}}^{{Adv}} \\left[{}||{} \\right])".format(self.p.prob_text,
self.q.prob_text))
def forward(self, x_dict, discriminator=False, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample x_p from p
x_p_dict = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
# sample x_q from q
x_q_dict = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
if discriminator:
# sample y_p from d
y_p = get_dict_values(self.d.sample(detach_dict(x_p_dict), **kwargs), self.d.var)[0]
# sample y_q from d
y_q = get_dict_values(self.d.sample(detach_dict(x_q_dict), **kwargs), self.d.var)[0]
return self.d_loss(y_p, y_q, batch_n), x_dict
# sample y_p from d
y_p_dict = self.d.sample(x_p_dict, **kwargs)
# sample y_q from d
y_q_dict = self.d.sample(x_q_dict, **kwargs)
y_p = get_dict_values(y_p_dict, self.d.var)[0]
y_q = get_dict_values(y_q_dict, self.d.var)[0]
return self.g_loss(y_p, y_q, batch_n), x_dict
def d_loss(self, y_p, y_q, batch_n):
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
return self.bce_loss(y_p, t_p) + self.bce_loss(y_q, t_q)
def g_loss(self, y_p, y_q, batch_n):
# set labels
t1 = torch.ones(batch_n, 1).to(y_p.device)
t2 = torch.zeros(batch_n, 1).to(y_p.device)
if self._inverse_g_loss:
y_p_loss = self.bce_loss(y_p, t2)
y_q_loss = self.bce_loss(y_q, t1)
else:
y_p_loss = -self.bce_loss(y_p, t1)
y_q_loss = -self.bce_loss(y_q, t2)
if self.p.distribution_name == "Data distribution":
y_p_loss = y_p_loss.detach()
if self.q.distribution_name == "Data distribution":
y_q_loss = y_q_loss.detach()
return y_p_loss + y_q_loss
def loss_train(self, train_x_dict, **kwargs):
return super().loss_train(train_x_dict, **kwargs)
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
class AdversarialKullbackLeibler(AdversarialLoss):
r"""
Kullback-Leibler divergence (adversarial training).
.. math::
D_{KL}[p(x)||q(x)] = \mathbb{E}_{p(x)}\left[\log \frac{p(x)}{q(x)}\right]
\approx \mathbb{E}_{p(x)}\left[\log \frac{d^*(x)}{1-d^*(x)}\right],
where :math:`d^*(x) = \arg\max_{d} \mathbb{E}_{q(x)}[\log d(x)] + \mathbb{E}_{p(x)}[\log (1-d(x))]`.
Note that this divergence is minimized to close :math:`p` to :math:`q`.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": torch.sigmoid(self.model(x))}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialKullbackLeibler(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(D_{KL}^{Adv} \left[p(x)||p_{data}(x) \right])
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> # The evaluation value might be negative if the discriminator training is incomplete.
>>> print(loss) # doctest: +SKIP
tensor(-0.8377, grad_fn=<AddBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(1.9321, grad_fn=<AddBackward0>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Kim+ 2018] Disentangling by Factorising
"""
def __init__(self, p, q, discriminator, **kwargs):
super().__init__(p, q, discriminator, **kwargs)
self.bce_loss = nn.BCELoss()
@property
def _symbol(self):
return sympy.Symbol("mean(D_{{KL}}^{{Adv}} \\left[{}||{} \\right])".format(self.p.prob_text,
self.q.prob_text))
def forward(self, x_dict, discriminator=False, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample x_p from p
x_p_dict = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
if discriminator:
# sample x_q from q
x_q_dict = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.d.input_var, True)
# sample y_p from d
y_p = get_dict_values(self.d.sample(detach_dict(x_p_dict), **kwargs), self.d.var)[0]
# sample y_q from d
y_q = get_dict_values(self.d.sample(detach_dict(x_q_dict), **kwargs), self.d.var)[0]
return self.d_loss(y_p, y_q, batch_n), {}
# sample y from d
y_p = get_dict_values(self.d.sample(x_p_dict, **kwargs), self.d.var)[0]
return self.g_loss(y_p, batch_n), {}
def g_loss(self, y_p, batch_n):
"""Evaluate a generator loss given an output of the discriminator.
Parameters
----------
y_p : torch.Tensor
Output of discriminator given sample from p.
batch_n : int
Batch size of inputs.
Returns
-------
torch.Tensor
"""
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
y_p_loss = -self.bce_loss(y_p, t_p) + self.bce_loss(y_p, t_q) # log (y_p) - log (1 - y_p)
return y_p_loss
def d_loss(self, y_p, y_q, batch_n):
# set labels
t_p = torch.ones(batch_n, 1).to(y_p.device)
t_q = torch.zeros(batch_n, 1).to(y_p.device)
return self.bce_loss(y_p, t_p) + self.bce_loss(y_q, t_q)
def loss_train(self, train_x_dict, **kwargs):
return super().loss_train(train_x_dict, **kwargs)
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
class AdversarialWassersteinDistance(AdversarialJensenShannon):
r"""
Wasserstein distance (adversarial training).
.. math::
W(p, q) = \sup_{||d||_{L} \leq 1} \mathbb{E}_{p(x)}[d(x)] - \mathbb{E}_{q(x)}[d(x)]
Examples
--------
>>> import torch
>>> from pixyz.distributions import Deterministic, EmpiricalDistribution, Normal
>>> # Generator
>>> class Generator(Deterministic):
... def __init__(self):
... super(Generator, self).__init__(var=["x"], cond_var=["z"], name="p")
... self.model = nn.Linear(32, 64)
... def forward(self, z):
... return {"x": self.model(z)}
>>> p_g = Generator()
>>> prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
... var=["z"], features_shape=[32], name="p_{prior}")
>>> p = (p_g*prior).marginalize_var("z")
>>> print(p)
Distribution:
p(x) = \int p(x|z)p_{prior}(z)dz
Network architecture:
p_{prior}(z):
Normal(
name=p_{prior}, distribution_name=Normal,
var=['z'], cond_var=[], input_var=[], features_shape=torch.Size([32])
(loc): torch.Size([1, 32])
(scale): torch.Size([1, 32])
)
p(x|z):
Generator(
name=p, distribution_name=Deterministic,
var=['x'], cond_var=['z'], input_var=['z'], features_shape=torch.Size([])
(model): Linear(in_features=32, out_features=64, bias=True)
)
>>> # Data distribution (dummy distribution)
>>> p_data = EmpiricalDistribution(["x"])
>>> print(p_data)
Distribution:
p_{data}(x)
Network architecture:
EmpiricalDistribution(
name=p_{data}, distribution_name=Data distribution,
var=['x'], cond_var=[], input_var=['x'], features_shape=torch.Size([])
)
>>> # Discriminator (critic)
>>> class Discriminator(Deterministic):
... def __init__(self):
... super(Discriminator, self).__init__(var=["t"], cond_var=["x"], name="d")
... self.model = nn.Linear(64, 1)
... def forward(self, x):
... return {"t": self.model(x)}
>>> d = Discriminator()
>>> print(d)
Distribution:
d(t|x)
Network architecture:
Discriminator(
name=d, distribution_name=Deterministic,
var=['t'], cond_var=['x'], input_var=['x'], features_shape=torch.Size([])
(model): Linear(in_features=64, out_features=1, bias=True)
)
>>>
>>> # Set the loss class
>>> loss_cls = AdversarialWassersteinDistance(p, p_data, discriminator=d)
>>> print(loss_cls)
mean(W^{Adv} \left(p(x), p_{data}(x) \right))
>>>
>>> sample_x = torch.randn(2, 64) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor(-0.0060, grad_fn=<SubBackward0>)
>>> # For evaluating a discriminator loss, set the `discriminator` option to True.
>>> loss_d = loss_cls.eval({"x": sample_x}, discriminator=True)
>>> print(loss_d) # doctest: +SKIP
tensor(-0.3802, grad_fn=<NegBackward>)
>>> # When training the evaluation metric (discriminator), use the train method.
>>> train_loss = loss_cls.loss_train({"x": sample_x})
References
----------
[Arjovsky+ 2017] Wasserstein GAN
"""
def __init__(self, p, q, discriminator,
clip_value=0.01, **kwargs):
super().__init__(p, q, discriminator, **kwargs)
self._clip_value = clip_value
@property
def _symbol(self):
return sympy.Symbol("mean(W^{{Adv}} \\left({}, {} \\right))".format(self.p.prob_text, self.q.prob_text))
def d_loss(self, y_p, y_q, *args, **kwargs):
return - (torch.mean(y_p) - torch.mean(y_q))
def g_loss(self, y_p, y_q, *args, **kwargs):
if self.p.distribution_name == "Data distribution":
y_p = y_p.detach()
if self.q.distribution_name == "Data distribution":
y_q = y_q.detach()
return torch.mean(y_p) - torch.mean(y_q)
def loss_train(self, train_x_dict, **kwargs):
loss = super().loss_train(train_x_dict, **kwargs)
# Clip weights of discriminator
for params in self.d.parameters():
params.data.clamp_(-self._clip_value, self._clip_value)
return loss
def loss_test(self, test_x_dict, **kwargs):
return super().loss_test(test_x_dict, **kwargs)
| 19,715 | 33.650264 | 116 | py |
pixyz | pixyz-main/pixyz/losses/divergences.py | import sympy
import torch
from torch.distributions import kl_divergence
from ..utils import get_dict_values
from .losses import Divergence
def KullbackLeibler(p, q, dim=None, analytical=True, sample_shape=torch.Size([1])):
r"""
Kullback-Leibler divergence (analytical or Monte Carlo Apploximation).
.. math::
D_{KL}[p||q] &= \mathbb{E}_{p(x)}\left[\log \frac{p(x)}{q(x)}\right] \qquad \text{(analytical)}\\
&\approx \frac{1}{L}\sum_{l=1}^L \log\frac{p(x_l)}{q(x_l)},
\quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal, Beta
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[64], name="p")
>>> q = Normal(loc=torch.tensor(1.), scale=torch.tensor(1.), var=["z"], features_shape=[64], name="q")
>>> loss_cls = KullbackLeibler(p,q,analytical=True)
>>> print(loss_cls)
D_{KL} \left[p(z)||q(z) \right]
>>> loss_cls.eval()
tensor([32.])
>>> loss_cls = KullbackLeibler(p,q,analytical=False,sample_shape=[64])
>>> print(loss_cls)
\mathbb{E}_{p(z)} \left[\log p(z) - \log q(z) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([31.4713])
"""
if analytical:
loss = AnalyticalKullbackLeibler(p, q, dim)
else:
loss = (p.log_prob() - q.log_prob()).expectation(p, sample_shape=sample_shape)
return loss
class AnalyticalKullbackLeibler(Divergence):
def __init__(self, p, q, dim=None):
self.dim = dim
super().__init__(p, q)
@property
def _symbol(self):
return sympy.Symbol("D_{{KL}} \\left[{}||{} \\right]".format(self.p.prob_text, self.q.prob_text))
def forward(self, x_dict, **kwargs):
if (not hasattr(self.p, 'distribution_torch_class')) or (not hasattr(self.q, 'distribution_torch_class')):
raise ValueError("Divergence between these two distributions cannot be evaluated, "
"got %s and %s." % (self.p.distribution_name, self.q.distribution_name))
input_dict = get_dict_values(x_dict, self.p.input_var, True)
self.p.set_dist(input_dict)
input_dict = get_dict_values(x_dict, self.q.input_var, True)
self.q.set_dist(input_dict)
divergence = kl_divergence(self.p.dist, self.q.dist)
if self.dim:
divergence = torch.sum(divergence, dim=self.dim)
return divergence, {}
dim_list = list(torch.arange(divergence.dim()))
divergence = torch.sum(divergence, dim=dim_list[1:])
return divergence, {}
"""
if (self._p1.distribution_name == "vonMisesFisher" and \
self._p2.distribution_name == "HypersphericalUniform"):
inputs = get_dict_values(x, self._p1.input_var, True)
params1 = self._p1.get_params(inputs, **kwargs)
hyu_dim = self._p2.dim
return vmf_hyu_kl(params1["loc"], params1["scale"],
hyu_dim, self.device), x
raise Exception("You cannot use these distributions, "
"got %s and %s." % (self._p1.distribution_name,
self._p2.distribution_name))
#inputs = get_dict_values(x, self._p2.input_var, True)
#self._p2.set_dist(inputs)
#divergence = kl_divergence(self._p1.dist, self._p2.dist)
if self.dim:
_kl = torch.sum(divergence, dim=self.dim)
return divergence, x
"""
"""
def vmf_hyu_kl(vmf_loc, vmf_scale, hyu_dim, device):
__m = vmf_loc.shape[-1]
vmf_entropy = vmf_scale * ive(__m/2, vmf_scale) / ive((__m/2)-1, vmf_scale)
vmf_log_norm = ((__m / 2 - 1) * torch.log(vmf_scale) - (__m / 2) * math.log(2 * math.pi) - (
vmf_scale + torch.log(ive(__m / 2 - 1, vmf_scale))))
vmf_log_norm = vmf_log_norm.view(*(vmf_log_norm.shape[:-1]))
vmf_entropy = vmf_entropy.view(*(vmf_entropy.shape[:-1])) + vmf_log_norm
hyu_entropy = math.log(2) + ((hyu_dim + 1) / 2) * math.log(math.pi) - torch.lgamma(
torch.Tensor([(hyu_dim + 1) / 2])).to(device)
return - vmf_entropy + hyu_entropy
"""
| 4,222 | 37.045045 | 114 | py |
pixyz | pixyz-main/pixyz/losses/iteration.py | from copy import deepcopy
import sympy
from .losses import Loss
from ..utils import get_dict_values, replace_dict_keys
class IterativeLoss(Loss):
r"""
Iterative loss.
This class allows implementing an arbitrary model which requires iteration.
.. math::
\mathcal{L} = \sum_{t=0}^{T-1}\mathcal{L}_{step}(x_t, h_t),
where :math:`x_t = f_{slice\_step}(x, t)`.
Examples
--------
>>> import torch
>>> from torch.nn import functional as F
>>> from pixyz.distributions import Normal, Bernoulli, Deterministic
>>>
>>> # Set distributions
>>> x_dim = 128
>>> z_dim = 64
>>> h_dim = 32
>>>
>>> # p(x|z,h_{prev})
>>> class Decoder(Bernoulli):
... def __init__(self):
... super().__init__(var=["x"],cond_var=["z", "h_prev"],name="p")
... self.fc = torch.nn.Linear(z_dim + h_dim, x_dim)
... def forward(self, z, h_prev):
... return {"probs": torch.sigmoid(self.fc(torch.cat((z, h_prev), dim=-1)))}
...
>>> # q(z|x,h_{prev})
>>> class Encoder(Normal):
... def __init__(self):
... super().__init__(var=["z"],cond_var=["x", "h_prev"],name="q")
... self.fc_loc = torch.nn.Linear(x_dim + h_dim, z_dim)
... self.fc_scale = torch.nn.Linear(x_dim + h_dim, z_dim)
... def forward(self, x, h_prev):
... xh = torch.cat((x, h_prev), dim=-1)
... return {"loc": self.fc_loc(xh), "scale": F.softplus(self.fc_scale(xh))}
...
>>> # f(h|x,z,h_{prev}) (update h)
>>> class Recurrence(Deterministic):
... def __init__(self):
... super().__init__(var=["h"], cond_var=["x", "z", "h_prev"], name="f")
... self.rnncell = torch.nn.GRUCell(x_dim + z_dim, h_dim)
... def forward(self, x, z, h_prev):
... return {"h": self.rnncell(torch.cat((z, x), dim=-1), h_prev)}
>>>
>>> p = Decoder()
>>> q = Encoder()
>>> f = Recurrence()
>>>
>>> # Set the loss class
>>> step_loss_cls = p.log_prob().expectation(q * f).mean()
>>> print(step_loss_cls)
mean \left(\mathbb{E}_{q(z,h|x,h_{prev})} \left[\log p(x|z,h_{prev}) \right] \right)
>>> loss_cls = IterativeLoss(step_loss=step_loss_cls,
... series_var=["x"], update_value={"h": "h_prev"})
>>> print(loss_cls)
\sum_{t=0}^{t_{max} - 1} mean \left(\mathbb{E}_{q(z,h|x,h_{prev})} \left[\log p(x|z,h_{prev}) \right] \right)
>>>
>>> # Evaluate
>>> x_sample = torch.randn(30, 2, 128) # (timestep_size, batch_size, feature_size)
>>> h_init = torch.zeros(2, 32) # (batch_size, h_dim)
>>> loss = loss_cls.eval({"x": x_sample, "h_prev": h_init})
>>> print(loss) # doctest: +SKIP
tensor(-2826.0906, grad_fn=<AddBackward0>
"""
def __init__(self, step_loss, max_iter=None,
series_var=(), update_value={}, slice_step=None, timestep_var=()):
super().__init__()
self.step_loss = step_loss
self.max_iter = max_iter
self.update_value = update_value
self.timestep_var = timestep_var
if timestep_var:
self.timpstep_symbol = sympy.Symbol(self.timestep_var[0])
else:
self.timpstep_symbol = sympy.Symbol("t")
if not series_var and (max_iter is None):
raise ValueError()
self.slice_step = slice_step
if self.slice_step:
self.step_loss = self.step_loss.expectation(self.slice_step)
_input_var = []
_input_var += deepcopy(self.step_loss.input_var)
_input_var += series_var
_input_var += update_value.values()
self._input_var = sorted(set(_input_var), key=_input_var.index)
if timestep_var:
self._input_var.remove(timestep_var[0]) # delete a time-step variable from input_var
self.series_var = series_var
@property
def _symbol(self):
# TODO: naive implementation
dummy_loss = sympy.Symbol("dummy_loss")
if self.max_iter:
max_iter = self.max_iter
else:
max_iter = sympy.Symbol(sympy.latex(self.timpstep_symbol) + "_{max}")
_symbol = sympy.Sum(dummy_loss, (self.timpstep_symbol, 0, max_iter - 1))
_symbol = _symbol.subs({dummy_loss: self.step_loss._symbol})
return _symbol
def slice_step_fn(self, t, x):
return {k: v[t] for k, v in x.items()}
def forward(self, x_dict, **kwargs):
series_x_dict = get_dict_values(x_dict, self.series_var, return_dict=True)
updated_x_dict = get_dict_values(x_dict, list(self.update_value.values()), return_dict=True)
step_loss_sum = 0
# set max_iter
if self.max_iter:
max_iter = self.max_iter
else:
max_iter = len(series_x_dict[self.series_var[0]])
if "mask" in kwargs.keys():
mask = kwargs["mask"].float()
else:
mask = None
for t in range(max_iter):
if self.timestep_var:
x_dict.update({self.timestep_var[0]: t})
if not self.slice_step:
# update series inputs & use slice_step_fn
x_dict.update(self.slice_step_fn(t, series_x_dict))
# evaluate
step_loss, samples = self.step_loss.eval(x_dict, return_dict=True, return_all=False)
x_dict.update(samples)
if mask is not None:
step_loss *= mask[t]
step_loss_sum += step_loss
# update
x_dict = replace_dict_keys(x_dict, self.update_value)
loss = step_loss_sum
# Restore original values
x_dict.update(series_x_dict)
x_dict.update(updated_x_dict)
# TODO: x_dict contains no-updated variables.
return loss, x_dict
| 5,840 | 34.186747 | 113 | py |
pixyz | pixyz-main/pixyz/losses/wasserstein.py | from torch.nn.modules.distance import PairwiseDistance
import sympy
from .losses import Divergence
from ..utils import get_dict_values
class WassersteinDistance(Divergence):
r"""
Wasserstein distance.
.. math::
W(p, q) = \inf_{\Gamma \in \mathcal{P}(x_p\sim p, x_q\sim q)} \mathbb{E}_{(x_p, x_q) \sim \Gamma}[d(x_p, x_q)]
However, instead of the above true distance, this class computes the following one.
.. math::
W'(p, q) = \mathbb{E}_{x_p\sim p, x_q \sim q}[d(x_p, x_q)].
Here, :math:`W'` is the upper of :math:`W` (i.e., :math:`W\leq W'`), and these are equal when both :math:`p`
and :math:`q` are degenerate (deterministic) distributions.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64], name="q")
>>> loss_cls = WassersteinDistance(p, q)
>>> print(loss_cls)
W^{upper} \left(p(z|x), q(z|x) \right)
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
"""
def __init__(self, p, q, metric=PairwiseDistance(p=2)):
if set(p.var) != set(q.var):
raise ValueError("The two distribution variables must be the same.")
if len(p.var) != 1:
raise ValueError("A given distribution must have only one variable.")
super().__init__(p, q)
if len(p.input_var) > 0:
self.input_dist = p
elif len(q.input_var) > 0:
self.input_dist = q
else:
raise NotImplementedError()
self.metric = metric
@property
def _symbol(self):
return sympy.Symbol("W^{{upper}} \\left({}, {} \\right)".format(self.p.prob_text, self.q.prob_text))
def _get_batch_n(self, x_dict):
return get_dict_values(x_dict, self.input_dist.input_var[0])[0].shape[0]
def forward(self, x_dict, **kwargs):
batch_n = self._get_batch_n(x_dict)
# sample from distributions
p_x = get_dict_values(self.p.sample(x_dict, batch_n=batch_n, **kwargs), self.p.var)[0]
q_x = get_dict_values(self.q.sample(x_dict, batch_n=batch_n, **kwargs), self.q.var)[0]
if p_x.shape != q_x.shape:
raise ValueError("The two distribution variables must have the same shape.")
distance = self.metric(p_x, q_x)
return distance, {}
| 2,504 | 32.4 | 119 | py |
pixyz | pixyz-main/pixyz/losses/pdf.py | import sympy
import torch
from .losses import Loss
class LogProb(Loss):
r"""
The log probability density/mass function.
.. math::
\log p(x)
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = LogProb(p) # or p.log_prob()
>>> print(loss_cls)
\log p(x)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([12.9894, 15.5280])
"""
def __init__(self, p, sum_features=True, feature_dims=None):
input_var = p.var + p.cond_var
super().__init__(input_var=input_var)
self.sum_features = sum_features
self.feature_dims = feature_dims
self.p = p
@property
def _symbol(self):
return sympy.Symbol("\\log {}".format(self.p.prob_text))
def forward(self, x={}, **kwargs):
log_prob = self.p.get_log_prob(x, sum_features=self.sum_features, feature_dims=self.feature_dims, **kwargs)
return log_prob, {}
class Prob(LogProb):
r"""
The probability density/mass function.
.. math::
p(x) = \exp(\log p(x))
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"],
... features_shape=[10])
>>> loss_cls = Prob(p) # or p.prob()
>>> print(loss_cls)
p(x)
>>> sample_x = torch.randn(2, 10) # Psuedo data
>>> loss = loss_cls.eval({"x": sample_x})
>>> print(loss) # doctest: +SKIP
tensor([3.2903e-07, 5.5530e-07])
"""
@property
def _symbol(self):
return sympy.Symbol(self.p.prob_text)
def forward(self, x={}, **kwargs):
log_prob, x = super().forward(x, **kwargs)
return torch.exp(log_prob), {}
| 1,978 | 25.039474 | 115 | py |
pixyz | pixyz-main/pixyz/losses/elbo.py | import torch
def ELBO(p, q, sample_shape=torch.Size([1])):
r"""
The evidence lower bound (Monte Carlo approximation).
.. math::
\mathbb{E}_{q(z|x)}\left[\log \frac{p(x,z)}{q(z|x)}\right] \approx \frac{1}{L}\sum_{l=1}^L \log p(x, z_l),
\quad \text{where} \quad z_l \sim q(z|x).
Note:
This class is a special case of the :attr:`Expectation` class.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> q = Normal(loc="x", scale=torch.tensor(1.), var=["z"], cond_var=["x"], features_shape=[64]) # q(z|x)
>>> p = Normal(loc="z", scale=torch.tensor(1.), var=["x"], cond_var=["z"], features_shape=[64]) # p(x|z)
>>> loss_cls = ELBO(p,q)
>>> print(loss_cls)
\mathbb{E}_{p(z|x)} \left[\log p(x|z) - \log p(z|x) \right]
>>> loss = loss_cls.eval({"x": torch.randn(1, 64)})
"""
loss = (p.log_prob() - q.log_prob()).expectation(q, sample_shape=sample_shape)
return loss
| 985 | 33 | 114 | py |
pixyz | pixyz-main/pixyz/losses/entropy.py | import sympy
import torch
from pixyz.losses.losses import Loss
from pixyz.losses.divergences import KullbackLeibler
def Entropy(p, analytical=True, sample_shape=torch.Size([1])):
r"""
Entropy (Analytical or Monte Carlo approximation).
.. math::
H(p) &= -\mathbb{E}_{p(x)}[\log p(x)] \qquad \text{(analytical)}\\
&\approx -\frac{1}{L}\sum_{l=1}^L \log p(x_l), \quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], features_shape=[64])
>>> loss_cls = Entropy(p,analytical=True)
>>> print(loss_cls)
H \left[ {p(x)} \right]
>>> loss_cls.eval()
tensor([90.8121])
>>> loss_cls = Entropy(p,analytical=False,sample_shape=[10])
>>> print(loss_cls)
- \mathbb{E}_{p(x)} \left[\log p(x) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([90.5991])
"""
if analytical:
loss = AnalyticalEntropy(p)
else:
loss = -p.log_prob().expectation(p, sample_shape=sample_shape)
return loss
class AnalyticalEntropy(Loss):
def __init__(self, p):
_input_var = p.input_var.copy()
super().__init__(_input_var)
self.p = p
@property
def _symbol(self):
p_text = "{" + self.p.prob_text + "}"
return sympy.Symbol("H \\left[ {} \\right]".format(p_text))
def forward(self, x_dict, **kwargs):
if not hasattr(self.p, 'distribution_torch_class'):
raise ValueError("Entropy of this distribution cannot be evaluated, "
"got %s." % self.p.distribution_name)
entropy = self.p.get_entropy(x_dict)
return entropy, {}
def CrossEntropy(p, q, analytical=False, sample_shape=torch.Size([1])):
r"""
Cross entropy, a.k.a., the negative expected value of log-likelihood (Monte Carlo approximation or Analytical).
.. math::
H(p,q) &= -\mathbb{E}_{p(x)}[\log q(x)] \qquad \text{(analytical)}\\
&\approx -\frac{1}{L}\sum_{l=1}^L \log q(x_l), \quad \text{where} \quad x_l \sim p(x) \quad \text{(MC approximation)}.
Examples
--------
>>> import torch
>>> from pixyz.distributions import Normal
>>> p = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["x"], features_shape=[64], name="p")
>>> q = Normal(loc=torch.tensor(1.), scale=torch.tensor(1.), var=["x"], features_shape=[64], name="q")
>>> loss_cls = CrossEntropy(p,q,analytical=True)
>>> print(loss_cls)
D_{KL} \left[p(x)||q(x) \right] + H \left[ {p(x)} \right]
>>> loss_cls.eval()
tensor([122.8121])
>>> loss_cls = CrossEntropy(p,q,analytical=False,sample_shape=[10])
>>> print(loss_cls)
- \mathbb{E}_{p(x)} \left[\log q(x) \right]
>>> loss_cls.eval() # doctest: +SKIP
tensor([123.2192])
"""
if analytical:
loss = Entropy(p) + KullbackLeibler(p, q)
else:
loss = -q.log_prob().expectation(p, sample_shape=sample_shape)
return loss
class StochasticReconstructionLoss(Loss):
def __init__(self, encoder, decoder, sample_shape=torch.Size([1])):
raise NotImplementedError("This function is obsolete."
" please use `-decoder.log_prob().expectation(encoder)` instead of it.")
| 3,375 | 33.44898 | 126 | py |
pixyz | pixyz-main/tests/test_example_usage.py | # flake8: noqa: F841
from __future__ import print_function
# if you want to run all tests (contains below), type> pytest -m "performance or not performance"
import pytest
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
from pixyz.distributions import Deterministic
from pixyz.models import GAN
from pixyz.distributions import InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, ActNorm2d, ChannelConv
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.distributions.mixture_distributions import MixtureModel
from pixyz.models import VI
from pixyz.utils import get_dict_values
from pixyz.distributions import Normal, Bernoulli, Categorical, ProductOfNormal
from pixyz.losses import KullbackLeibler
from pixyz.models import VAE
from pixyz.utils import print_latex
seed = 1
torch.manual_seed(seed)
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
batch_size = 2
epochs = 2
mock_mnist = [(torch.zeros(28 * 28), 0), (torch.ones(28 * 28), 1)]
mock_mnist_targets = torch.tensor([0, 1])
mock_cifar10 = [(torch.ones(3, 32, 32), 3), (torch.ones(3, 32, 32), 1)]
# # Conditional variational autoencoder (using the VAE class)
@pytest.mark.performance
def test_run_cvae():
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z,y)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z", "y"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z, y):
h = F.relu(self.fc1(torch.cat([z, y], 1)))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
kl = KullbackLeibler(q, prior)
print(kl)
print_latex(kl)
# In[9]:
model = VAE(q, p, regularizer=kl, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
z.update({"y": y})
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.sample_mean({"z": z, "y": y}).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = q.sample({"x": x, "y": y}, return_all=False)
z.update({"y": batch_dummy * _y[None, :]})
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 1
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# # Examples of creating and operating distributions in Pixyz
@pytest.mark.performance
def test_run_distributions():
# In[1]:
# In[2]:
# In[3]:
x_dim = 20
y_dim = 30
z_dim = 40
a_dim = 50
batch_n = 2
class P1(Normal):
def __init__(self):
super(P1, self).__init__(cond_var=["y", "a"], var=["x"], name="p_{1}")
self.fc1 = nn.Linear(y_dim, 10)
self.fc2 = nn.Linear(a_dim, 10)
self.fc21 = nn.Linear(10 + 10, 20)
self.fc22 = nn.Linear(10 + 10, 20)
def forward(self, a, y):
h1 = F.relu(self.fc1(y))
h2 = F.relu(self.fc2(a))
h12 = torch.cat([h1, h2], 1)
return {"loc": self.fc21(h12), "scale": F.softplus(self.fc22(h12))}
class P2(Normal):
def __init__(self):
super(P2, self).__init__(cond_var=["x", "y"], var=["z"], name="p_{2}")
self.fc3 = nn.Linear(x_dim, 30)
self.fc4 = nn.Linear(30 + y_dim, 400)
self.fc51 = nn.Linear(400, 20)
self.fc52 = nn.Linear(400, 20)
def forward(self, x, y):
h3 = F.relu(self.fc3(x))
h4 = F.relu(self.fc4(torch.cat([h3, y], 1)))
return {"loc": self.fc51(h4), "scale": F.softplus(self.fc52(h4))}
p4 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["a"], features_shape=[a_dim], name="p_{4}")
p6 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["y"], features_shape=[y_dim], name="p_{6}")
x = torch.from_numpy(np.random.random((batch_n, x_dim)).astype("float32"))
y = torch.from_numpy(np.random.random((batch_n, y_dim)).astype("float32"))
a = torch.from_numpy(np.random.random((batch_n, a_dim)).astype("float32"))
# In[4]:
p1 = P1()
p2 = P2()
p3 = p2 * p1
p3.name = "p_{3}"
p5 = p3 * p4
p5.name = "p_{5}"
p_all = p1 * p2 * p4 * p6
p_all.name = "p_{all}"
# In[5]:
print(p1)
print_latex(p1)
# In[6]:
print(p2)
print_latex(p2)
# In[7]:
print(p3)
print_latex(p3)
# In[8]:
print(p4)
print_latex(p4)
# In[9]:
print(p5)
print_latex(p5)
# In[10]:
print(p_all)
print_latex(p_all)
# In[11]:
for param in p3.parameters():
print(type(param.data), param.size())
# In[12]:
p1.sample({"a": a, "y": y}, return_all=False)
# In[13]:
p1.sample({"a": a, "y": y}, sample_shape=[5], return_all=False)
# In[14]:
p1.sample({"a": a, "y": y}, return_all=True)
# In[15]:
p1_log_prob = p1.log_prob()
print(p1_log_prob)
print_latex(p1_log_prob)
# In[16]:
outputs = p1.sample({"y": y, "a": a})
print(p1_log_prob.eval(outputs))
# In[17]:
outputs = p2.sample({"x": x, "y": y})
print(p2.log_prob().eval(outputs))
# In[18]:
outputs = p1.sample({"y": y, "a": a})
print(outputs)
# In[19]:
p2.sample(outputs)
# In[20]:
outputs = p3.sample({"y": y, "a": a}, batch_n=batch_n)
print(p3.log_prob().eval(outputs))
# In[21]:
outputs = p_all.sample(batch_n=batch_n)
print(p_all.log_prob().eval(outputs))
# In[ ]:
# # Generative adversarial network (using the GAN class)
@pytest.mark.performance
def test_run_gan():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
z_dim = 100
# generator model p(x|z)
class Generator(Deterministic):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*block(z_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, x_dim),
nn.Sigmoid()
)
def forward(self, z):
x = self.model(z)
return {"x": x}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# generative model
p_g = Generator()
p = (p_g * prior).marginalize_var("z").to(device)
# In[5]:
print(p)
print_latex(p)
# In[6]:
# discriminator model p(t|x)
class Discriminator(Deterministic):
def __init__(self):
super(Discriminator, self).__init__(cond_var=["x"], var=["t"], name="d")
self.model = nn.Sequential(
nn.Linear(x_dim, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
t = self.model(x)
return {"t": t}
d = Discriminator().to(device)
# In[7]:
print(d)
print_latex(d)
# In[8]:
model = GAN(p, d,
optimizer=optim.Adam, optimizer_params={"lr": 0.0002},
d_optimizer=optim.Adam, d_optimizer_params={"lr": 0.0002})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
train_d_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss, d_loss = model.train({"x": x})
train_loss += loss
train_d_loss += d_loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
train_d_loss = train_d_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}, {:.4f}'.format(epoch, train_loss.item(), train_d_loss.item()))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
test_d_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss, d_loss = model.test({"x": x})
test_loss += loss
test_d_loss += d_loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
test_d_loss = test_d_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}, {:.4f}'.format(test_loss, test_d_loss.item()))
return test_loss
# In[11]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p_g.sample({"z": z_sample})["x"].view(-1, 1, 28, 28).cpu()
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = torch.randn(64, z_dim).to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = _y.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
#
# writer.close()
# In[ ]:
# # Glow (CIFAR10)
@pytest.mark.performance
def test_run_glow():
# In[1]:
# In[2]:
root = '../data'
num_workers = 8
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
train_loader = DataLoader(mock_cifar10, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_loader = DataLoader(mock_cifar10, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# In[3]:
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = []
flow_list.append(Preprocess())
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(ActNorm2d(in_channels * 4))
flow_list.append(ChannelConv(in_channels * 4))
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=False))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[9]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print(p)
print_latex(p)
# In[10]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# # Gaussian Mixture Model
@pytest.mark.performance
def test_run_gmm():
# In[1]:
# import matplotlib.pyplot as plt
# from matplotlib import cm
# from mpl_toolkits.mplot3d import Axes3D
# ### toy dataset
# In[2]:
# https://angusturner.github.io/generative_models/2017/11/03/pytorch-gaussian-mixture-model.html
def sample(mu, var, nb_samples=500):
"""
Return a tensor of (nb_samples, features), sampled
from the parameterized gaussian.
:param mu: torch.Tensor of the means
:param var: torch.Tensor of variances (NOTE: zero covars.)
"""
out = []
for i in range(nb_samples):
out += [
torch.normal(mu, var.sqrt())
]
return torch.stack(out, dim=0)
# generate some clusters
cluster1 = sample(
torch.Tensor([1.5, 2.5]),
torch.Tensor([1.2, .8]),
nb_samples=150
)
cluster2 = sample(
torch.Tensor([7.5, 7.5]),
torch.Tensor([.75, .5]),
nb_samples=50
)
cluster3 = sample(
torch.Tensor([8, 1.5]),
torch.Tensor([.6, .8]),
nb_samples=100
)
def plot_2d_sample(sample_dict):
x = sample_dict["x"][:, 0].data.numpy()
y = sample_dict["x"][:, 1].data.numpy()
# plt.plot(x, y, 'gx')
# plt.show()
# In[3]:
# create the dummy dataset, by combining the clusters.
samples = torch.cat([cluster1, cluster2, cluster3])
samples = (samples - samples.mean(dim=0)) / samples.std(dim=0)
samples_dict = {"x": samples}
plot_2d_sample(samples_dict)
# ## GMM
# In[4]:
z_dim = 3 # the number of mixture
x_dim = 2
distributions = []
for i in range(z_dim):
loc = torch.randn(x_dim)
scale = torch.empty(x_dim).fill_(0.6)
distributions.append(Normal(loc=loc, scale=scale, var=["x"], name="p_%d" % i))
probs = torch.empty(z_dim).fill_(1. / z_dim)
prior = Categorical(probs=probs, var=["z"], name="p_{prior}")
# In[5]:
p = MixtureModel(distributions=distributions, prior=prior)
print(p)
print_latex(p)
# In[6]:
post = p.posterior()
print(post)
print_latex(post)
# In[7]:
def get_density(N=200, x_range=(-5, 5), y_range=(-5, 5)):
x = np.linspace(*x_range, N)
y = np.linspace(*y_range, N)
x, y = np.meshgrid(x, y)
# get the design matrix
points = np.concatenate([x.reshape(-1, 1), y.reshape(-1, 1)], axis=1)
points = torch.from_numpy(points).float()
pdf = p.prob().eval({"x": points}).data.numpy().reshape([N, N])
return x, y, pdf
# In[8]:
# def plot_density_3d(x, y, loglike):
# fig = plt.figure(figsize=(10, 10))
# ax = fig.gca(projection='3d')
# ax.plot_surface(x, y, loglike, rstride=3, cstride=3, linewidth=1, antialiased=True,
# cmap=cm.inferno)
# cset = ax.contourf(x, y, loglike, zdir='z', offset=-0.15, cmap=cm.inferno)
#
# # adjust the limits, ticks and view angle
# ax.set_zlim(-0.15, 0.2)
# ax.set_zticks(np.linspace(0, 0.2, 5))
# ax.view_init(27, -21)
# plt.show()
# In[9]:
def plot_density_2d(x, y, pdf):
# fig = plt.figure(figsize=(5, 5))
# plt.plot(samples_dict["x"][:, 0].data.numpy(), samples_dict["x"][:, 1].data.numpy(), 'gx')
#
# for d in distributions:
# plt.scatter(d.loc[0, 0], d.loc[0, 1], c='r', marker='o')
#
# cs = plt.contour(x, y, pdf, 10, colors='k', linewidths=2)
# plt.show()
pass
# In[10]:
eps = 1e-6
min_scale = 1e-6
# plot_density_3d(*get_density())
plot_density_2d(*get_density())
print("Epoch: {}, log-likelihood: {}".format(0, p.log_prob().mean().eval(samples_dict)))
for epoch in range(20):
# E-step
posterior = post.prob().eval(samples_dict)
# M-step
N_k = posterior.sum(dim=1) # (n_mix,)
# update probs
probs = N_k / N_k.sum() # (n_mix,)
prior.probs[0] = probs
# update loc & scale
loc = (posterior[:, None] @ samples[None]).squeeze(1) # (n_mix, n_dim)
loc /= (N_k[:, None] + eps)
cov = (samples[None, :, :] - loc[:, None, :]) ** 2 # Covariances are set to 0.
var = (posterior[:, None, :] @ cov).squeeze(1) # (n_mix, n_dim)
var /= (N_k[:, None] + eps)
scale = var.sqrt()
for i, d in enumerate(distributions):
d.loc[0] = loc[i]
d.scale[0] = scale[i]
# plot_density_3d(*get_density())
plot_density_2d(*get_density())
print("Epoch: {}, log-likelihood: {}".format(epoch + 1, p.log_prob().mean().eval({"x": samples}).mean()))
# In[11]:
psudo_sample_dict = p.sample(batch_n=200)
plot_2d_sample(samples_dict)
# In[ ]:
# # Variational inference on a hierarchical latent model
@pytest.mark.performance
def test_run_hvi():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
a_dim = 64
z_dim = 32
# inference models
class Q1(Normal):
def __init__(self):
super(Q1, self).__init__(cond_var=["x"], var=["a"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, a_dim)
self.fc32 = nn.Linear(512, a_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
class Q2(Normal):
def __init__(self):
super(Q2, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
q1 = Q1().to(device)
q2 = Q2().to(device)
q = q1 * q2
q.name = "q"
# generative models
class P2(Normal):
def __init__(self):
super(P2, self).__init__(cond_var=["z"], var=["a"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, a_dim)
self.fc32 = nn.Linear(512, a_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
class P3(Bernoulli):
def __init__(self):
super(P3, self).__init__(cond_var=["a"], var=["x"], name="p")
self.fc1 = nn.Linear(a_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, a):
h = F.relu(self.fc1(a))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p2 = P2().to(device)
p3 = P3().to(device)
p1 = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
_p = p2 * p3
p = _p * p1
# In[5]:
print(p)
print_latex(p)
# In[6]:
print(_p)
print_latex(_p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
model = VI(p, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x})
z = get_dict_values(z, _p.cond_var, return_dict=True) # select latent variables
recon_batch = _p.sample(z)["x"].view(-1, 1, 28, 28) # TODO: it should be sample_mean
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = _p.sample({"z": z_sample})["x"].view(-1, 1, 28, 28).cpu() # TODO: it should be sample_mean
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# # JMVAE with a PoE encoder (using the VAE class)
# * JMVAE: Joint Multimodal Learning with Deep Generative Models
# * The PoE encoder is originally proposed in "Multimodal Generative Models for Scalable Weakly-Supervised Learning"
@pytest.mark.performance
def test_run_jmvae_poe():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# In[7]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[8]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[12]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Joint multimodal variational autoencoder (JMVAE, using the VAE class)
# Original paper: Joint Multimodal Learning with Deep Generative Models (https://arxiv.org/abs/1611.01891 )
@pytest.mark.performance
def test_run_jmvae():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|x)
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y)
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
q = Inference().to(device)
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
p = p_x * p_y
# In[5]:
print(p)
print_latex(p)
# In[6]:
kl = KullbackLeibler(q, prior)
kl_x = KullbackLeibler(q, q_x)
kl_y = KullbackLeibler(q, q_y)
regularizer = kl + kl_x + kl_y
print(regularizer)
print_latex(regularizer)
# In[7]:
model = VAE(q, p, other_distributions=[q_x, q_y],
regularizer=regularizer, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[8]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[9]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[10]:
def plot_reconstrunction_missing(x):
with torch.no_grad():
z = q_x.sample({"x": x}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
z = q_y.sample({"y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
z = q.sample({"x": x, "y": y}, return_all=False)
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[11]:
# writer = SummaryWriter()
plot_number = 1
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Semi-supervised learning with M2 model
@pytest.mark.performance
def test_run_m2():
# In[1]:
# In[2]:
# https://github.com/wohlert/semi-supervised-pytorch/blob/master/examples/notebooks/datautils.py
from functools import reduce
from operator import __or__
from torch.utils.data.sampler import SubsetRandomSampler
# from torchvision.datasets import MNIST
import numpy as np
# labels_per_class = 10
# n_labels = 10
labels_per_class = 1
n_labels = 2
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
#
# mnist_train = MNIST(root=root, train=True, download=True, transform=transform)
# mnist_valid = MNIST(root=root, train=False, transform=transform)
mnist_train = mock_mnist
mnist_valid = mock_mnist
def get_sampler(labels, n=None):
# Only choose digits in n_labels
(indices,) = np.where(reduce(__or__, [labels == i for i in np.arange(n_labels)]))
# Ensure uniform distribution of labels
np.random.shuffle(indices)
indices = np.hstack([list(filter(lambda idx: labels[idx] == i, indices))[:n] for i in range(n_labels)])
indices = torch.from_numpy(indices)
sampler = SubsetRandomSampler(indices)
return sampler
# Dataloaders for MNIST
# kwargs = {'num_workers': 1, 'pin_memory': True}
# labelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
# sampler=get_sampler(mnist_train.targets.numpy(), labels_per_class),
# **kwargs)
# unlabelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
# sampler=get_sampler(mnist_train.targets.numpy()), **kwargs)
# validation = torch.utils.data.DataLoader(mnist_valid, batch_size=batch_size,
# sampler=get_sampler(mnist_valid.targets.numpy()), **kwargs)
kwargs = {'num_workers': 1, 'pin_memory': True}
labelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy(), labels_per_class),
**kwargs)
unlabelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy()), **kwargs)
validation = torch.utils.data.DataLoader(mnist_valid, batch_size=batch_size,
sampler=get_sampler(mock_mnist_targets.numpy()), **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli, RelaxedCategorical, Categorical
from pixyz.models import Model
from pixyz.losses import ELBO
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x,y)
class Inference(Normal):
def __init__(self):
super().__init__(cond_var=["x", "y"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim + y_dim, 512)
self.fc21 = nn.Linear(512, z_dim)
self.fc22 = nn.Linear(512, z_dim)
def forward(self, x, y):
h = F.relu(self.fc1(torch.cat([x, y], 1)))
return {"loc": self.fc21(h), "scale": F.softplus(self.fc22(h))}
# generative model p(x|z,y)
class Generator(Bernoulli):
def __init__(self):
super().__init__(cond_var=["z", "y"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim + y_dim, 512)
self.fc2 = nn.Linear(512, x_dim)
def forward(self, z, y):
h = F.relu(self.fc1(torch.cat([z, y], 1)))
return {"probs": torch.sigmoid(self.fc2(h))}
# classifier p(y|x)
class Classifier(RelaxedCategorical):
def __init__(self):
super(Classifier, self).__init__(cond_var=["x"], var=["y"], name="p")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, y_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.softmax(self.fc2(h), dim=1)
return {"probs": h}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# distributions for supervised learning
p = Generator().to(device)
q = Inference().to(device)
f = Classifier().to(device)
p_joint = p * prior
# In[5]:
print(p_joint)
print_latex(p_joint)
# In[6]:
print(q)
print_latex(q)
# In[7]:
print(f)
print_latex(f)
# In[8]:
# distributions for unsupervised learning
_q_u = q.replace_var(x="x_u", y="y_u")
p_u = p.replace_var(x="x_u", y="y_u")
f_u = f.replace_var(x="x_u", y="y_u")
q_u = _q_u * f_u
p_joint_u = p_u * prior
p_joint_u.to(device)
q_u.to(device)
f_u.to(device)
print(p_joint_u)
print_latex(p_joint_u)
# In[9]:
print(q_u)
print_latex(q_u)
# In[10]:
print(f_u)
print_latex(f_u)
# In[11]:
elbo_u = ELBO(p_joint_u, q_u)
elbo = ELBO(p_joint, q)
nll = -f.log_prob() # or -LogProb(f)
rate = 1 * (len(unlabelled) + len(labelled)) / len(labelled)
loss_cls = -elbo_u.mean() - elbo.mean() + (rate * nll).mean()
print(loss_cls)
print_latex(loss_cls)
# In[12]:
model = Model(loss_cls, test_loss=nll.mean(),
distributions=[p, q, f], optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[13]:
def train(epoch):
train_loss = 0
for x_u, y_u in unlabelled:
x, y = iter(labelled).next()
x = x.to(device)
y = torch.eye(10)[y].to(device)
x_u = x_u.to(device)
loss = model.train({"x": x, "y": y, "x_u": x_u})
train_loss += loss
train_loss = train_loss * unlabelled.batch_size / len(unlabelled.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[14]:
def test(epoch):
test_loss = 0
correct = 0
total = 0
for x, y in validation:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
pred_y = f.sample_mean({"x": x})
total += y.size(0)
correct += (pred_y.argmax(dim=1) == y.argmax(dim=1)).sum().item()
test_loss = test_loss * validation.batch_size / len(validation.dataset)
test_accuracy = 100 * correct / total
print('Test loss: {:.4f}, Test accuracy: {:.4f}'.format(test_loss, test_accuracy))
return test_loss, test_accuracy
# In[15]:
# writer = SummaryWriter()
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss, test_accuracy = test(epoch)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
# writer.add_scalar('test_accuracy', test_accuracy, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Maximum likelihood estimation (using the ML class)
@pytest.mark.performance
def test_run_maximum_likelihood():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Categorical
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
y_dim = 10
# classifier p(y|x)
class Classifier(Categorical):
def __init__(self):
super(Classifier, self).__init__(cond_var=["x"], var=["y"])
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
h = F.softmax(self.fc3(h), dim=1)
return {"probs": h}
p = Classifier().to(device)
# In[5]:
print(p)
print_latex(p)
# In[6]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[7]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[8]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[9]:
# writer = SummaryWriter()
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # MMD-VAE (using the Model class)
@pytest.mark.performance
def test_run_mmd_vae():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli, EmpiricalDistribution
from pixyz.losses import CrossEntropy, MMD
from pixyz.models import Model
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_data = EmpiricalDistribution(["x"]).to(device)
q_mg = (q * p_data).marginalize_var("x")
q_mg.name = "q"
# In[5]:
print(p)
print_latex(p)
# In[6]:
print(q_mg)
print_latex(q_mg)
# In[7]:
loss_cls = CrossEntropy(q, p).mean() + MMD(q_mg, prior, kernel="gaussian", sigma_sqr=z_dim / 2.)
print(loss_cls)
print_latex(loss_cls)
# In[8]:
model = Model(loss=loss_cls, distributions=[p, q, q_mg], optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[9]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[10]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[11]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[12]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # MVAE
@pytest.mark.performance
def test_run_mvae():
# * Original paper: Multimodal Generative Models for Scalable Weakly-Supervised Learning (https://papers.nips.cc/paper/7801-multimodal-generative-models-for-scalable-weakly-supervised-learning.pdf)
# * Original code: https://github.com/mhw32/multimodal-vae-public
#
# ### MVAE summary
# Multimodal variational autoencoder(MVAE) uses a product-of-experts inferece network and a sub-sampled training paradigm to solve the multi-modal inferece problem.
# - Product-of-experts
# In the multimodal setting we assume the N modalities, $x_{1}, x_{2}, ..., x_{N}$, are conditionally independent given the common latent variable, z. That is we assume a generative model of the form $p_{\theta}(x_{1}, x_{2}, ..., x_{N}, z) = p(z)p_{\theta}(x_{1}|z)p_{\theta}(x_{2}|z)$・・・$p_{\theta}(x_{N}|z)$. The conditional independence assumptions in the generative model imply a relation among joint- and simgle-modality posteriors. That is, the joint posterior is a procuct of individual posteriors, with an additional quotient by the prior.
#
# - Sub-sampled training
# MVAE sub-sample which ELBO terms to optimize for every gradient step for capturing the relationships between modalities and training individual inference networks.
# In[1]:
# In[2]:
# MNIST
# treat labels as a second modality
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# ### In the original paper
# Modalities: $x_{1}, x_{2}, ..., x_{N}$
# Generative model:
#
# $p_{\theta}\left(x_{1}, x_{2}, \ldots, x_{N}, z\right)=p(z) p_{\theta}\left(x_{1} | z\right) p_{\theta}\left(x_{2} | z\right) \cdots p_{\theta}\left(x_{N} | z\right)$
#
# Inference:
#
# $p\left(z | x_{1}, \ldots, x_{N}\right) \propto \frac{\prod_{i=1}^{N} p\left(z | x_{i}\right)}{\prod_{i=1}^{N-1} p(z)} \approx \frac{\prod_{i=1}^{N}\left[\tilde{q}\left(z | x_{i}\right) p(z)\right]}{\prod_{i=1}^{N-1} p(z)}=p(z) \prod_{i=1}^{N} \tilde{q}\left(z | x_{i}\right)$
#
# ### MNIST settings
# Modalities:
# - x for image modality
# - y for label modality
#
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generators:
# $p_{\theta}(x|z) = \cal B(x; \lambda = g_x(z))$ for image modality
# $p_{\theta}(y|z) = \cal Cat(y; \lambda = g_y(z))$ for label modality
# $p_{\theta}\left(x, y, z\right)=p(z) p_{\theta}(x| z) p_{\theta}(y | z)$
#
# Inferences:
# $q_{\phi}(z|x) = \cal N(z; \mu=fx_\mu(x), \sigma^2=fx_{\sigma^2}(x))$ for image modality
# $q_{\phi}(z|y) = \cal N(z; \mu=fy_\mu(y), \sigma^2=fy_{\sigma^2}(y))$ for label modality
# $p(z)q_{\phi}(z|x)q_{\phi}(z|y)$
#
# In[4]:
from pixyz.distributions import Normal, Bernoulli, Categorical, ProductOfNormal
x_dim = 784
y_dim = 10
z_dim = 64
# inference model q(z|x) for image modality
class InferenceX(Normal):
def __init__(self):
super(InferenceX, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# inference model q(z|y) for label modality
class InferenceY(Normal):
def __init__(self):
super(InferenceY, self).__init__(cond_var=["y"], var=["z"], name="q")
self.fc1 = nn.Linear(y_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, y):
h = F.relu(self.fc1(y))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class GeneratorX(Bernoulli):
def __init__(self):
super(GeneratorX, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
# generative model p(y|z)
class GeneratorY(Categorical):
def __init__(self):
super(GeneratorY, self).__init__(cond_var=["z"], var=["y"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, y_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": F.softmax(self.fc3(h), dim=1)}
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_x = GeneratorX().to(device)
p_y = GeneratorY().to(device)
p = p_x * p_y
q_x = InferenceX().to(device)
q_y = InferenceY().to(device)
# equation (4) in the paper
# "we can use a product of experts (PoE), including a “prior expert”, as the approximating distribution for the joint-posterior"
# Pixyz docs: https://docs.pixyz.io/en/latest/distributions.html#pixyz.distributions.ProductOfNormal
q = ProductOfNormal([q_x, q_y], name="q").to(device)
# In[5]:
print(q)
print_latex(q)
# In[6]:
print(p)
print_latex(p)
# ## Define Loss function
# $\cal L = \mathrm{ELBO}\left(x_{1}, \ldots, x_{N}\right)+\sum_{i=1}^{N} \mathrm{ELBO}\left(x_{i}\right)+\sum_{j=1}^{k} \mathrm{ELBO}\left(X_{j}\right)$
# In[7]:
from pixyz.losses import KullbackLeibler
from pixyz.losses import LogProb
from pixyz.losses import Expectation as E
# In[8]:
ELBO = -E(q, LogProb(p)) + KullbackLeibler(q, prior)
ELBO_x = -E(q_x, LogProb(p_x)) + KullbackLeibler(q_x, prior)
ELBO_y = -E(q_y, LogProb(p_y)) + KullbackLeibler(q_y, prior)
loss = ELBO.mean() + ELBO_x.mean() + ELBO_y.mean()
print_latex(loss) # Note: Terms in the printed loss may be reordered
# ## Define MVAE model using Model Class
# In[9]:
from pixyz.models import Model
model = Model(loss=loss, distributions=[p_x, p_y, q_x, q_y],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruction and generation
# In[12]:
def plot_reconstrunction_missing_label_modality(x):
with torch.no_grad():
# infer from x (image modality) only
z = q_x.sample({"x": x}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_label(x, y):
with torch.no_grad():
x_all = [x.view(-1, 1, 28, 28)]
for i in range(7):
# infer from y (label modality) only
z = q_y.sample({"y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
x_all.append(recon_batch)
comparison = torch.cat(x_all).cpu()
return comparison
def plot_reconstrunction(x, y):
with torch.no_grad():
# infer from x and y
z = q.sample({"x": x, "y": y}, return_all=False)
# generate image from latent variable
recon_batch = p_x.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
plot_number = 1
# set-aside observation for watching generative model improvement
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_label(_x[:8], _y[:8])
recon_missing = plot_reconstrunction_missing_label_modality(_x[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_label', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_missing_label', recon_missing, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # A toy example of variational inference with normalizing flow (using the VI class)
@pytest.mark.performance
def test_run_normalizing_flow_toy():
# In[1]:
# In[2]:
from pixyz.distributions import CustomProb, Normal, TransformedDistribution
from pixyz.models import VI
from pixyz.flows import PlanarFlow, FlowList
from pixyz.utils import print_latex
# In[3]:
# def plot_samples(points):
# X_LIMS = (-4, 4)
# Y_LIMS = (-4, 4)
#
# fig = plt.figure(figsize=(4, 4))
# ax = fig.add_subplot(111)
# ax.scatter(points[:, 0], points[:, 1], alpha=0.7, s=25)
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(z)")
#
# plt.show()
# In[4]:
import torch
x_dim = 2
def log_prob(z):
z1, z2 = torch.chunk(z, chunks=2, dim=1)
norm = torch.sqrt(z1 ** 2 + z2 ** 2)
exp1 = torch.exp(-0.5 * ((z1 - 2) / 0.6) ** 2)
exp2 = torch.exp(-0.5 * ((z1 + 2) / 0.6) ** 2)
u = 0.5 * ((norm - 2) / 0.4) ** 2 - torch.log(exp1 + exp2)
return -u
p = CustomProb(log_prob, var=["z"])
# In[5]:
# def plot_density(p):
# X_LIMS = (-4, 4)
# Y_LIMS = (-4, 4)
#
# x1 = np.linspace(*X_LIMS, 300)
# x2 = np.linspace(*Y_LIMS, 300)
# x1, x2 = np.meshgrid(x1, x2)
# shape = x1.shape
# x1 = x1.ravel()
# x2 = x2.ravel()
#
# z = np.c_[x1, x2]
# z = torch.FloatTensor(z)
#
# density_values = p.prob().eval({"z": z}).data.numpy().reshape(shape)
# plt.imshow(density_values, cmap='jet')
# plt.show()
# plot_density(p)
# In[6]:
# prior
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["x"], features_shape=[x_dim], name="prior").to(device)
# In[7]:
# flow
f = FlowList([PlanarFlow(x_dim) for _ in range(32)])
# In[8]:
# transformed distribution (x -> f -> z)
q = TransformedDistribution(prior, f, var=["z"], name="q").to(device)
print(q)
print_latex(q)
# In[9]:
model = VI(p, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-2})
print(model)
print_latex(model)
# In[10]:
for epoch in range(epochs):
loss = model.train(batch_size=batch_size)
if epoch % 100 == 0:
print('Epoch: {} Test loss: {:.4f}'.format(epoch, loss))
loss = model.test(batch_n=batch_size)
samples = q.sample(batch_n=1000)
# plot_samples(samples["z"].cpu().data.numpy())
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP (CIFAR10)
@pytest.mark.performance
def test_run_real_nvp_cifar():
# In[1]:
# In[2]:
# root = '../data'
# num_workers = 8
#
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, Flow
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = [Preprocess()]
# Coupling_Layer(checkboard) x3
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels, mask_type="checkerboard",
scale_translate_net=ScaleTranslateNet(in_channels, mid_channels),
inverse_mask=(i % 2 != 0)))
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=(i % 2 != 0)))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP (CIFAR10)
@pytest.mark.performance
def test_run_real_nvp_cond():
# In[1]:
# In[2]:
# root = '../data'
# num_workers = 8
#
# transform_train = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.ToTensor()])
# transform_test = transforms.Compose([transforms.ToTensor()])
#
# train_loader = DataLoader(datasets.CIFAR10(root=root, train=True, download=True, transform=transform_train),
# batch_size=batch_size, shuffle=True, num_workers=num_workers)
#
# test_loader = DataLoader(datasets.CIFAR10(root=root, train=False, download=True, transform=transform_test),
# batch_size=batch_size, shuffle=False, num_workers=num_workers)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_cifar10, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, Squeeze, Unsqueeze, Preprocess, Flow
from pixyz.layers import ResNet
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
in_channels = 3
mid_channels = 64
num_scales = 2
input_dim = 32
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[in_channels, input_dim, input_dim], name="p_prior")
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.resnet = ResNet(in_channels=in_channels, mid_channels=mid_channels, out_channels=in_channels * 2,
num_blocks=8, kernel_size=3, padding=1,
double_after_norm=True)
def forward(self, x):
s_t = self.resnet(x)
log_s, t = torch.chunk(s_t, 2, dim=1)
log_s = torch.tanh(log_s)
return log_s, t
# In[7]:
flow_list = [Preprocess()]
# Coupling_Layer(checkboard) x3
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels, mask_type="checkerboard",
scale_translate_net=ScaleTranslateNet(in_channels, mid_channels),
inverse_mask=(i % 2 != 0)))
# Squeeze -> 3x coupling (channel-wise)
flow_list.append(Squeeze())
for i in range(3):
flow_list.append(AffineCoupling(in_features=in_channels * 4, mask_type="channel_wise",
scale_translate_net=ScaleTranslateNet(in_channels * 4, mid_channels * 2),
inverse_mask=(i % 2 != 0)))
flow_list.append(Unsqueeze())
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).cpu()
return sample
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z)
comparison = torch.cat([x.view(-1, 3, 32, 32), recon_batch]).cpu()
return comparison
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, 3, 32, 32).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Conditional Real NVP
@pytest.mark.performance
def test_run_real_nvp_cond_():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
y_dim = 10
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features + y_dim, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x, y):
hidden = F.relu(self.fc2(F.relu(self.fc1(torch.cat([x, y], 1)))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"], cond_var=["y"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, y).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.inverse(z, y).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, batch_dummy * _y[None, :]).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 5
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Conditional Real NVP
@pytest.mark.performance
def test_run_real_nvp_cond__():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
y_dim = 10
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features + y_dim, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x, y):
hidden = F.relu(self.fc2(F.relu(self.fc1(torch.cat([x, y], 1)))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"], cond_var=["y"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, y in tqdm(train_loader):
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.train({"x": x, "y": y})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, y in test_loader:
x = x.to(device)
y = torch.eye(10)[y].to(device)
loss = model.test({"x": x, "y": y})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x, y):
with torch.no_grad():
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, y).view(-1, 1, 28, 28)
recon = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return recon
def plot_image_from_latent(z, y):
with torch.no_grad():
sample = p.inverse(z, y).view(-1, 1, 28, 28).cpu()
return sample
def plot_reconstrunction_changing_y(x, y):
y_change = torch.eye(10)[range(7)].to(device)
batch_dummy = torch.ones(x.size(0))[:, None].to(device)
recon_all = []
with torch.no_grad():
for _y in y_change:
z = p.forward(x, y, compute_jacobian=False)
recon_batch = p.inverse(z, batch_dummy * _y[None, :]).view(-1, 1, 28, 28)
recon_all.append(recon_batch)
recon_changing_y = torch.cat(recon_all)
recon_changing_y = torch.cat([x.view(-1, 1, 28, 28), recon_changing_y]).cpu()
return recon_changing_y
# In[13]:
# writer = SummaryWriter()
plot_number = 5
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
y_sample = torch.eye(10)[[plot_number] * 64].to(device)
_x, _y = iter(test_loader).next()
_x = _x.to(device)
_y = torch.eye(10)[_y].to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8], _y[:8])
sample = plot_image_from_latent(z_sample, y_sample)
recon_changing_y = plot_reconstrunction_changing_y(_x[:8], _y[:8])
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
# writer.add_images('Image_reconstrunction_change_y', recon_changing_y, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# A toy example of Real NVP (using the ML class)
@pytest.mark.performance
def test_run_real_nvp_toy():
# In[1]:
test_size = 5
# In[2]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d
from pixyz.models import ML
from pixyz.utils import print_latex
# In[3]:
# def plot_samples(points, noise):
# X_LIMS = (-1.5, 2.5)
# Y_LIMS = (-2.5, 2.5)
#
# fig = plt.figure(figsize=(8, 4))
# ax = fig.add_subplot(121)
# ax.scatter(points[:, 0], points[:, 1], alpha=0.7, s=25, c="b")
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(x)")
#
# X_LIMS = (-3, 3)
# Y_LIMS = (-3, 3)
#
# ax = fig.add_subplot(122)
# ax.scatter(noise[:, 0], noise[:, 1], alpha=0.7, s=25, c="r")
# ax.set_xlim(*X_LIMS)
# ax.set_ylim(*Y_LIMS)
# ax.set_xlabel("p(z)")
#
# plt.show()
# In[4]:
x_dim = 2
z_dim = x_dim
# In[5]:
# prior
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.layers = nn.Sequential(nn.Linear(in_features, hidden_features),
nn.ReLU(),
nn.Linear(hidden_features, hidden_features),
nn.ReLU())
self.log_s = nn.Linear(hidden_features, in_features)
self.t = nn.Linear(hidden_features, in_features)
def forward(self, x):
hidden = self.layers(x)
log_s = torch.tanh(self.log_s(hidden))
t = self.t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
for i in range(5):
scale_translate_net = nn.Sequential(nn.Linear(x_dim, 256),
nn.ReLU(),
nn.Linear(256, 256),
nn.ReLU(),
nn.Linear(256, x_dim * 2))
flow_list.append(AffineCoupling(in_features=2,
scale_translate_net=ScaleTranslateNet(x_dim, 256),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(2))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-2})
print(model)
print_latex(model)
# In[10]:
# plot training set
from sklearn import datasets
x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
noise = prior.sample(batch_n=test_size)["z"].data.cpu()
# plot_samples(x, noise)
# In[11]:
for epoch in range(epochs):
x = datasets.make_moons(n_samples=batch_size, noise=0.1)[0].astype("float32")
x = torch.tensor(x).to(device)
loss = model.train({"x": x})
if epoch % 500 == 0:
print('Epoch: {} Test loss: {:.4f}'.format(epoch, loss))
# samples
samples = p.sample(batch_n=test_size)["x"].data.cpu()
# inference
_x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
_x = torch.tensor(_x).to(device)
noise = p.inference({"x": _x})["z"].data.cpu()
# plot_samples(samples, noise)
# In[12]:
samples = p.sample(batch_n=test_size)["x"].data.cpu()
# inference
_x = datasets.make_moons(n_samples=test_size, noise=0.1)[0].astype("float32")
_x = torch.tensor(_x).to(device)
noise = p.inference({"x": _x})["z"].data.cpu()
# plot_samples(samples, noise)
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Real NVP
@pytest.mark.performance
def test_run_real_nvp():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 4, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, InverseTransformedDistribution
from pixyz.flows import AffineCoupling, FlowList, BatchNorm1d, Shuffle, Preprocess, Reverse
from pixyz.models import ML
from pixyz.utils import print_latex
# In[4]:
x_dim = 28 * 28
z_dim = x_dim
# In[5]:
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_prior").to(device)
# In[6]:
class ScaleTranslateNet(nn.Module):
def __init__(self, in_features, hidden_features):
super().__init__()
self.fc1 = nn.Linear(in_features, hidden_features)
self.fc2 = nn.Linear(hidden_features, hidden_features)
self.fc3_s = nn.Linear(hidden_features, in_features)
self.fc3_t = nn.Linear(hidden_features, in_features)
def forward(self, x):
hidden = F.relu(self.fc2(F.relu(self.fc1(x))))
log_s = torch.tanh(self.fc3_s(hidden))
t = self.fc3_t(hidden)
return log_s, t
# In[7]:
# flow
flow_list = []
num_block = 5
flow_list.append(Preprocess())
for i in range(num_block):
flow_list.append(AffineCoupling(in_features=x_dim,
scale_translate_net=ScaleTranslateNet(x_dim, 1028),
inverse_mask=(i % 2 != 0)))
flow_list.append(BatchNorm1d(x_dim))
f = FlowList(flow_list)
# In[8]:
# inverse transformed distribution (z -> f^-1 -> x)
p = InverseTransformedDistribution(prior=prior, flow=f, var=["x"]).to(device)
print_latex(p)
# In[9]:
model = ML(p, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x):
with torch.no_grad():
z = p.forward(x, compute_jacobian=False)
recon_batch = p.inverse(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.inverse(z_sample).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# writer = SummaryWriter()
z_sample = torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the Model class)
@pytest.mark.performance
def test_run_vae_model():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli
from pixyz.losses import KullbackLeibler, Expectation as E
from pixyz.models import Model
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# In[8]:
loss = (KullbackLeibler(q, prior) - E(q, p.log_prob())).mean()
print(loss)
print_latex(loss)
# In[9]:
model = Model(loss=loss, distributions=[p, q],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[12]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# writer = SummaryWriter('/runs/vae_model')
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
# In[ ]:
# In[ ]:
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the VAE class)
@pytest.mark.performance
def test_run_vae_with_vae_class():
# * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf)
# In[1]:
# In[2]:
# MNIST
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generator: $p_{\theta}(x|z) = \cal B(x; \lambda = g(z))$
# Inference: $q_{\phi}(z|x) = \cal N(z; \mu=f_\mu(x), \sigma^2=f_{\sigma^2}(x))$
# In[4]:
from pixyz.distributions import Normal, Bernoulli
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
"""
parameterizes q(z | x)
infered z follows a Gaussian distribution with mean 'loc', variance 'scale'
z ~ N(loc, scale)
"""
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
"""
given the observation x,
return the mean and variance of the Gaussian distritbution
"""
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
"""
parameterizes the bernoulli(for MNIST) observation likelihood p(x | z)
"""
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
"""
given the latent variable z,
return the probability of Bernoulli distribution
"""
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
# z ~ N(0, 1)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# ## Define VAE model using VAE Model Class
# - https://docs.pixyz.io/en/latest/models.html#vae
# In[8]:
from pixyz.losses import KullbackLeibler
# define additional loss terms for regularizing representation of latent variables
kl = KullbackLeibler(q, prior)
print_latex(kl)
# In[9]:
from pixyz.models import VAE
model = VAE(encoder=q, decoder=p, regularizer=kl, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruct image and generate image
# In[12]:
def plot_reconstrunction(x):
"""
reconstruct image given input observation x
"""
with torch.no_grad():
# infer and sampling z using inference model q `.sample()` method
z = q.sample({"x": x}, return_all=False)
# reconstruct image from inferred latent variable z using Generator model p `.sample_mean()` method
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
# concatenate original image and reconstructed image for comparison
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
"""
generate new image given latent variable z
"""
with torch.no_grad():
# generate image from latent variable z using Generator model p `.sample_mean()` method
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
# fix latent variable z for watching generative model improvement
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
# set-aside observation for watching generative model improvement
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder
@pytest.mark.performance
def test_run_vae():
# * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf)
# In[1]:
# In[2]:
# MNIST
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.utils import print_latex
# ## Define probability distributions
# Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$
# Generator: $p_{\theta}(x|z) = \cal B(x; \lambda = g(z))$
# Inference: $q_{\phi}(z|x) = \cal N(z; \mu=f_\mu(x), \sigma^2=f_{\sigma^2}(x))$
# In[4]:
from pixyz.distributions import Normal, Bernoulli
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
"""
parameterizes q(z | x)
infered z follows a Gaussian distribution with mean 'loc', variance 'scale'
z ~ N(loc, scale)
"""
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
"""
given the observation x,
return the mean and variance of the Gaussian distritbution
"""
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
"""
parameterizes the bernoulli(for MNIST) observation likelihood p(x | z)
"""
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
"""
given the latent variable z,
return the probability of Bernoulli distribution
"""
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior p(z)
# z ~ N(0, 1)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
# In[5]:
print(prior)
print_latex(prior)
# In[6]:
print(p)
print_latex(p)
# In[7]:
print(q)
print_latex(q)
# ## Define Loss function
# Loss function:
#
# $\frac{1}{N} \sum_{i=1}^{N}\left[K L\left(q\left(z | x^{(i)}\right) \| p_{prior}(z)\right)-\mathbb{E}_{q\left(z | x^{(i)}\right)}\left[\log p\left(x^{(i)} | z\right)\right]\right]$
# In[8]:
from pixyz.losses import LogProb, KullbackLeibler, Expectation as E
loss = (KullbackLeibler(q, prior) - E(q, LogProb(p))).mean()
print_latex(loss)
# ## Define VAE model using Model Class
# - https://docs.pixyz.io/en/latest/models.html#model
# In[9]:
from pixyz.models import Model
model = Model(loss=loss, distributions=[p, q],
optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# ## Define Train and Test loop using model
# In[10]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[11]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# ## Reconstruct image and generate image
# In[12]:
def plot_reconstrunction(x):
"""
reconstruct image given input observation x
"""
with torch.no_grad():
# infer and sampling z using inference model q `.sample()` method
z = q.sample({"x": x}, return_all=False)
# reconstruct image from inferred latent variable z using Generator model p `.sample_mean()` method
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
# concatenate original image and reconstructed image for comparison
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
"""
generate new image given latent variable z
"""
with torch.no_grad():
# generate image from latent variable z using Generator model p `.sample_mean()` method
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[13]:
# for visualising in TensorBoard
# writer = SummaryWriter()
# fix latent variable z for watching generative model improvement
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
# set-aside observation for watching generative model improvement
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# !/usr/bin/env python
# coding: utf-8
# # Variational autoencoder (using the VI class)
@pytest.mark.performance
def test_run_vi():
# In[1]:
# In[2]:
# root = '../data'
# transform = transforms.Compose([transforms.ToTensor(),
# transforms.Lambda(lambd=lambda x: x.view(-1))])
# kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
#
# train_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=True, transform=transform, download=True),
# shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
# datasets.MNIST(root=root, train=False, transform=transform),
# shuffle=False, **kwargs)
kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True}
train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs)
# In[3]:
from pixyz.distributions import Normal, Bernoulli
from pixyz.models import VI
from pixyz.utils import print_latex
# In[4]:
x_dim = 784
z_dim = 64
# inference model q(z|x)
class Inference(Normal):
def __init__(self):
super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q")
self.fc1 = nn.Linear(x_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc31 = nn.Linear(512, z_dim)
self.fc32 = nn.Linear(512, z_dim)
def forward(self, x):
h = F.relu(self.fc1(x))
h = F.relu(self.fc2(h))
return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))}
# generative model p(x|z)
class Generator(Bernoulli):
def __init__(self):
super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p")
self.fc1 = nn.Linear(z_dim, 512)
self.fc2 = nn.Linear(512, 512)
self.fc3 = nn.Linear(512, x_dim)
def forward(self, z):
h = F.relu(self.fc1(z))
h = F.relu(self.fc2(h))
return {"probs": torch.sigmoid(self.fc3(h))}
p = Generator().to(device)
q = Inference().to(device)
# prior model p(z)
prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.),
var=["z"], features_shape=[z_dim], name="p_{prior}").to(device)
p_joint = p * prior
# In[5]:
print(p_joint)
print_latex(p_joint)
# In[6]:
print(q)
print_latex(q)
# In[7]:
model = VI(p_joint, q, optimizer=optim.Adam, optimizer_params={"lr": 1e-3})
print(model)
print_latex(model)
# In[8]:
def train(epoch):
train_loss = 0
for x, _ in tqdm(train_loader):
x = x.to(device)
loss = model.train({"x": x})
train_loss += loss
train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset)
print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss))
return train_loss
# In[9]:
def test(epoch):
test_loss = 0
for x, _ in test_loader:
x = x.to(device)
loss = model.test({"x": x})
test_loss += loss
test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset)
print('Test loss: {:.4f}'.format(test_loss))
return test_loss
# In[10]:
def plot_reconstrunction(x):
with torch.no_grad():
z = q.sample({"x": x}, return_all=False)
recon_batch = p.sample_mean(z).view(-1, 1, 28, 28)
comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu()
return comparison
def plot_image_from_latent(z_sample):
with torch.no_grad():
sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu()
return sample
# In[11]:
# writer = SummaryWriter()
z_sample = 0.5 * torch.randn(64, z_dim).to(device)
_x, _ = iter(test_loader).next()
_x = _x.to(device)
for epoch in range(1, epochs + 1):
train_loss = train(epoch)
test_loss = test(epoch)
recon = plot_reconstrunction(_x[:8])
sample = plot_image_from_latent(z_sample)
# writer.add_scalar('train_loss', train_loss.item(), epoch)
# writer.add_scalar('test_loss', test_loss.item(), epoch)
#
# writer.add_images('Image_from_latent', sample, epoch)
# writer.add_images('Image_reconstrunction', recon, epoch)
#
# writer.close()
# In[ ]:
| 126,799 | 29.517449 | 554 | py |
pixyz | pixyz-main/tests/distributions/test_distribution.py | import pytest
from os.path import join as pjoin
import torch
from pixyz.distributions import Normal, MixtureModel, Categorical, FactorizedBernoulli
from pixyz.utils import lru_cache_for_sample_dict
from pixyz.losses import KullbackLeibler
from pixyz.models import VAE
class TestGraph:
def test_rename_atomdist(self):
normal = Normal(var=['x'], name='p')
graph = normal.graph
assert graph.name == 'p'
normal.name = 'q'
assert graph.name == 'q'
def test_print(self):
normal = Normal(var=['x'], name='p')
print(normal.graph)
def test_set_option(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([2, 3, 4])
assert sample['x'].shape == torch.Size([2, 3, 4])
dist.graph.set_option({}, ['y'])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(sample,
sum_features=False).shape == torch.Size([2, 3, 4])
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * FactorizedBernoulli(
var=['y'], probs=torch.tensor([0.3, 0.8]))
dist.graph.set_option(dict(batch_n=3, sample_shape=(4,)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([4, 3, 2])
assert sample['x'].shape == torch.Size([4, 3, 2])
dist.graph.set_option(dict(), ['y'])
assert dist.get_log_prob(sample, sum_features=True, feature_dims=[-1]).shape == torch.Size([4, 3])
def test_sample_mean(self):
dist = Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], cond_var=['x'], loc='x', scale=1)
assert dist.sample(sample_mean=True)['y'] == torch.zeros(1)
def test_input_extra_var(self):
normal = Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], loc=0, scale=1)
assert set(normal.sample({'z': torch.zeros(1)})) == set(('x', 'y', 'z'))
assert normal.get_log_prob({'y': torch.zeros(1), 'x': torch.zeros(1),
'z': torch.zeros(1)}).shape == torch.Size([1])
assert set(normal.sample({'x': torch.zeros(1)})) == set(('x', 'y'))
class TestDistributionBase:
def test_init_with_scalar_params(self):
normal = Normal(loc=0, scale=1, features_shape=[2])
assert normal.sample()['x'].shape == torch.Size([1, 2])
assert normal.features_shape == torch.Size([2])
normal = Normal(loc=0, scale=1)
assert normal.sample()['x'].shape == torch.Size([1])
assert normal.features_shape == torch.Size([])
def test_batch_n(self):
normal = Normal(loc=0, scale=1)
assert normal.sample(batch_n=3)['x'].shape == torch.Size([3])
def test_input_extra_var(self):
normal = Normal(loc=0, scale=1)
assert set(normal.sample({'y': torch.zeros(1)})) == set(('x', 'y'))
assert normal.get_log_prob({'y': torch.zeros(1), 'x': torch.zeros(1)}).shape == torch.Size([1])
assert set(normal.sample({'x': torch.zeros(1)})) == set(('x'))
def test_sample_mean(self):
dist = Normal(loc=0, scale=1)
assert dist.sample(sample_mean=True)['x'] == torch.zeros(1)
@pytest.mark.parametrize(
"dist", [
Normal(loc=0, scale=1),
Normal(var=['x'], loc=0, scale=1) * Normal(var=['y'], loc=0, scale=1),
# Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1),
],
)
def test_get_log_prob_feature_dims(self, dist):
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[-2]).shape == torch.Size([2, 4])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[0, 1]).shape == torch.Size([4])
assert dist.get_log_prob(dist.sample(batch_n=4, sample_shape=(2, 3)),
sum_features=True, feature_dims=[]).shape == torch.Size([2, 3, 4])
def test_get_log_prob_feature_dims2(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
dist.graph.set_option(dict(batch_n=4, sample_shape=(2, 3)), ['y'])
sample = dist.sample()
assert sample['y'].shape == torch.Size([2, 3, 4])
list(dist.graph._factors_from_variable('y'))[0].option = {}
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=None).shape == torch.Size([2])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[-2]).shape == torch.Size([2, 4])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[0, 1]).shape == torch.Size([4])
assert dist.get_log_prob(sample,
sum_features=True, feature_dims=[]).shape == torch.Size([2, 3, 4])
@pytest.mark.parametrize(
"dist", [
Normal(loc=0, scale=1),
Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1),
])
def test_unknown_option(self, dist):
x_dict = dist.sample(unknown_opt=None)
dist.get_log_prob(x_dict, unknown_opt=None)
class TestReplaceVarDistribution:
def test_get_params(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
result = dist.get_params({'y': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
result = dist.get_params({'z': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.get_params({'y': torch.ones(1)})
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(x='z')
result = dist.get_params({'y': torch.ones(1)})
assert list(result.keys()) == ['loc', 'scale']
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1) * Normal(var=['y'], loc=0, scale=1)
with pytest.raises(NotImplementedError):
dist.get_params()
def test_sample_mean(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
result = dist.sample_mean({'y': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
result = dist.sample_mean({'z': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.sample_mean({'y': torch.ones(1)})
def test_sample_variance(self):
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y')
result = dist.sample_variance({'y': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y').replace_var(y='z')
result = dist.sample_variance({'z': torch.ones(1)})
assert result == torch.ones(1)
dist = Normal(var=['x'], cond_var=['y'], loc=2, scale='y').replace_var(y='z')
with pytest.raises(ValueError):
dist.sample_variance({'y': torch.ones(1)})
def test_get_entropy(self):
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1)
truth = dist.get_entropy({'y': torch.ones(1)})
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z', x='y')
result = dist.get_entropy({'z': torch.ones(1)})
assert result == truth
dist = Normal(var=['x'], cond_var=['y'], loc='y', scale=1).replace_var(y='z')
with pytest.raises(ValueError):
dist.get_entropy({'y': torch.ones(1)})
class TestMixtureDistribution:
def test_sample_mean(self):
dist = MixtureModel([Normal(loc=0, scale=1), Normal(loc=1, scale=1)], Categorical(probs=torch.tensor([1., 2.])))
assert dist.sample(sample_mean=True)['x'] == torch.ones(1)
def test_memoization():
exec_order = []
class Encoder(Normal):
def __init__(self, exec_order):
super().__init__(var=["z"], cond_var=["x"], name="q")
self.linear = torch.nn.Linear(10, 10)
self.exec_order = exec_order
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
return super().get_params(params_dict, **kwargs)
def forward(self, x):
exec_order.append("E")
return {"loc": self.linear(x), "scale": 1.0}
class Decoder(Normal):
def __init__(self, exec_order):
super().__init__(var=["x"], cond_var=["z"], name="p")
self.exec_order = exec_order
@lru_cache_for_sample_dict()
def get_params(self, params_dict={}, **kwargs):
return super().get_params(params_dict, **kwargs)
def forward(self, z):
self.exec_order.append("D")
return {"loc": z, "scale": 1.0}
def prior():
return Normal(var=["z"], name="p_{prior}", features_shape=[10], loc=torch.tensor(0.), scale=torch.tensor(1.))
q = Encoder(exec_order)
p = Decoder(exec_order)
prior = prior()
kl = KullbackLeibler(q, prior)
mdl = VAE(q, p, regularizer=kl, optimizer=torch.optim.Adam, optimizer_params={"lr": 1e-3})
x = torch.zeros((10, 10))
mdl.train({"x": x})
assert exec_order == ["E", "D"]
@pytest.mark.parametrize(
"no_contiguous_tensor", [
torch.zeros(2, 3),
torch.zeros(2, 3).T,
torch.zeros(1).expand(3),
]
)
def test_save_dist(tmpdir, no_contiguous_tensor):
# pull request:#110
ones = torch.ones_like(no_contiguous_tensor)
p = Normal(loc=no_contiguous_tensor, scale=ones)
save_path = pjoin(tmpdir, "tmp.pt")
torch.save(p.state_dict(), save_path)
q = Normal(loc=ones, scale=3 * ones)
assert not torch.all(no_contiguous_tensor == q.loc).item()
# it needs copy of tensor
q = Normal(loc=ones, scale=ones)
q.load_state_dict(torch.load(save_path))
assert torch.all(no_contiguous_tensor == q.loc).item()
if __name__ == "__main__":
TestReplaceVarDistribution().test_get_entropy()
| 10,844 | 41.034884 | 120 | py |
pixyz | pixyz-main/tests/distributions/test_expornential_distributions.py | import pytest
import torch
from pixyz.distributions.exponential_distributions import RelaxedBernoulli, Normal
class TestNormal:
def test_init_with_same_param(self):
n = Normal(var=['x'], cond_var=['y'], loc='y', scale='y')
result = n.sample({'y': torch.ones(2, 3)})
assert result['x'].shape == (2, 3)
class TestRelaxedBernoulli:
def test_log_prob_of_hard_value(self):
rb = RelaxedBernoulli(var=['x'], temperature=torch.tensor(0.5), probs=torch.ones(2))
assert self.nearly_eq(rb.get_log_prob({'x': torch.tensor([0., 1.])}), torch.tensor([-15.9424]))
def nearly_eq(self, tensor1, tensor2):
return abs(tensor1.item() - tensor2.item()) < 0.001
def test_sample_mean(self):
rb = RelaxedBernoulli(var=['x'], temperature=torch.tensor(0.5), probs=torch.tensor([0.5, 0.8]))
with pytest.raises(NotImplementedError):
rb.sample(sample_mean=True)
| 932 | 34.884615 | 103 | py |
pixyz | pixyz-main/tests/models/test_model.py | import os
import torch
import torch.nn as nn
from pixyz.distributions import Normal
from pixyz.losses import CrossEntropy
from pixyz.models import Model
class TestModel:
def _make_model(self, loc):
class Dist(Normal):
def __init__(self):
super().__init__(loc=loc, scale=1)
self.module = nn.Linear(2, 2)
p = Dist()
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
loss = CrossEntropy(p, p).to(device)
model = Model(loss=loss, distributions=[p])
return model
def test_save_load(self, tmp_path):
model = self._make_model(0)
save_path = os.path.join(tmp_path, 'model.pth')
model.save(save_path)
model = self._make_model(1)
p: Normal = model.distributions[0]
assert p.get_params()['loc'] == 1
model.load(save_path)
p: Normal = model.distributions[0]
assert p.get_params()['loc'] == 0
| 1,009 | 24.897436 | 55 | py |
pixyz | pixyz-main/tests/losses/test_iteration.py | import torch
from pixyz.losses import IterativeLoss, Parameter, Expectation
from pixyz.distributions import Normal
class TestIterativeLoss:
def test_print_latex(self):
t_max = 3
itr = IterativeLoss(Parameter('t'), max_iter=t_max, timestep_var='t')
assert itr.loss_text == r"\sum_{t=0}^{" + str(t_max - 1) + "} t"
def test_time_specific_step_loss(self):
t_max = 3
itr = IterativeLoss(Parameter('t'), max_iter=t_max, timestep_var='t')
assert itr.eval() == sum(range(t_max))
def test_input_var(self):
q = Normal(var=['z'], cond_var=['x'], loc='x', scale=1)
p = Normal(var=['y'], cond_var=['z'], loc='z', scale=1)
e = Expectation(q, p.log_prob())
assert set(e.input_var) == set(('x', 'y'))
assert e.eval({'y': torch.zeros(1), 'x': torch.zeros(1)}).shape == torch.Size([1])
def test_input_extra_var(self):
q = Normal(var=['z'], cond_var=['x'], loc='x', scale=1)
p = Normal(var=['y'], cond_var=['z'], loc='z', scale=1)
e = Expectation(q, p.log_prob())
assert set(e.eval({'y': torch.zeros(1), 'x': torch.zeros(1),
'w': torch.zeros(1)}, return_dict=True)[1]) == set(('w', 'x', 'y', 'z'))
assert set(e.eval({'y': torch.zeros(1), 'x': torch.zeros(1),
'z': torch.zeros(1)}, return_dict=True)[1]) == set(('x', 'y', 'z'))
| 1,412 | 43.15625 | 99 | py |
pixyz | pixyz-main/tutorial/English/utils.py | from torch.utils.data import Dataset
import pickle
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
def imshow(img_tensors):
img = torchvision.utils.make_grid(img_tensors)
npimg = img.numpy()
plt.figure(figsize=(16, 12))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class DMMDataset(Dataset):
def __init__(self, pickle_path="cartpole_28.pickle"):
with open(pickle_path, mode='rb') as f:
data = pickle.load(f)
episode_frames, actions = data
# episode_frames: np.array([episode_num, one_episode_length, height, width, Channels]) (10000, 30, 28, 28, 3)
# actions: np.array([episode_num, one_episode_length]) (10000, 30)
# HWC → CHW
episode_frames = episode_frames.transpose(0, 1, 4, 2, 3) / 1.0
# print(episode_frames.dtype)
actions = actions[:, :, np.newaxis]
self.episode_frames = torch.from_numpy(episode_frames.astype(np.float32))
self.actions = torch.from_numpy(actions.astype(np.float32))
self.mean = torch.zeros_like(self.episode_frames[0])
self.std = torch.zeros_like(self.episode_frames[0])
self.mean[:, 0, :, :] = 182.6091
self.mean[:, 1, :, :] = 182.6091
self.mean[:, 2, :, :] = 182.6091
self.std[:, 0, :, :] = 45.5565
self.std[:, 1, :, :] = 47.6260
self.std[:, 2, :, :] = 50.7284
def __len__(self):
return len(self.episode_frames)
def __getitem__(self, idx):
return {
"episode_frames": (self.episode_frames[idx] - self.mean) / self.std,
"actions": self.actions[idx]
}
def _calculate_mean_std(self):
print(self.episode_frames.shape)
std = torch.std(self.episode_frames, dim=(0, 1, 3, 4))
mean = torch.mean(self.episode_frames, dim=(0, 1, 3, 4))
print("mean: ", mean)
print(mean.shape)
print("std: ", std)
print(std.shape)
# mean: tensor([182.6091, 182.6091, 182.6091])
# torch.Size([3])
# std: tensor([45.5565, 47.6260, 50.7284])
# torch.Size([3])
def postprocess(image):
image_ = image.detach().clone()
# print(image_.shape)
mean = torch.ones_like(image_)
std = torch.ones_like(image_)
mean[:, 0, :, :] = 182.6091
mean[:, 1, :, :] = 182.6091
mean[:, 2, :, :] = 182.6091
std[:, 0, :, :] = 45.5565
std[:, 1, :, :] = 47.6260
std[:, 2, :, :] = 50.7284
image_ = image_ * std + mean
image_ = torch.clamp(image_, min=0.0, max=255.0) / 255.
return image_
if __name__ == "__main__":
data_set = DMMDataset()
data_set._calculate_mean_std()
| 2,721 | 29.931818 | 117 | py |
pixyz | pixyz-main/tutorial/Japanese/utils.py | from torch.utils.data import Dataset
import pickle
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
def imshow(img_tensors):
img = torchvision.utils.make_grid(img_tensors)
npimg = img.numpy()
plt.figure(figsize=(16, 12))
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class DMMDataset(Dataset):
def __init__(self, pickle_path="cartpole_28.pickle"):
with open(pickle_path, mode='rb') as f:
data = pickle.load(f)
episode_frames, actions = data
# episode_frames: np.array([episode_num, one_episode_length, height, width, Channels]) (10000, 30, 28, 28, 3)
# actions: np.array([episode_num, one_episode_length]) (10000, 30)
# HWC → CHW
episode_frames = episode_frames.transpose(0, 1, 4, 2, 3) / 1.0
# print(episode_frames.dtype)
actions = actions[:, :, np.newaxis]
self.episode_frames = torch.from_numpy(episode_frames.astype(np.float32))
self.actions = torch.from_numpy(actions.astype(np.float32))
self.mean = torch.zeros_like(self.episode_frames[0])
self.std = torch.zeros_like(self.episode_frames[0])
self.mean[:, 0, :, :] = 182.6091
self.mean[:, 1, :, :] = 182.6091
self.mean[:, 2, :, :] = 182.6091
self.std[:, 0, :, :] = 45.5565
self.std[:, 1, :, :] = 47.6260
self.std[:, 2, :, :] = 50.7284
def __len__(self):
return len(self.episode_frames)
def __getitem__(self, idx):
return {
"episode_frames": (self.episode_frames[idx] - self.mean) / self.std,
"actions": self.actions[idx]
}
def _calculate_mean_std(self):
print(self.episode_frames.shape)
std = torch.std(self.episode_frames, dim=(0, 1, 3, 4))
mean = torch.mean(self.episode_frames, dim=(0, 1, 3, 4))
print("mean: ", mean)
print(mean.shape)
print("std: ", std)
print(std.shape)
# mean: tensor([182.6091, 182.6091, 182.6091])
# torch.Size([3])
# std: tensor([45.5565, 47.6260, 50.7284])
# torch.Size([3])
def postprocess(image):
image_ = image.detach().clone()
# print(image_.shape)
mean = torch.ones_like(image_)
std = torch.ones_like(image_)
mean[:, 0, :, :] = 182.6091
mean[:, 1, :, :] = 182.6091
mean[:, 2, :, :] = 182.6091
std[:, 0, :, :] = 45.5565
std[:, 1, :, :] = 47.6260
std[:, 2, :, :] = 50.7284
image_ = image_ * std + mean
image_ = torch.clamp(image_, min=0.0, max=255.0) / 255.
return image_
if __name__ == "__main__":
data_set = DMMDataset()
data_set._calculate_mean_std()
| 2,721 | 29.931818 | 117 | py |
archive-query-log | archive-query-log-main/archive_query_log/results/test/test_facebook_serp_parsing.py | # flake8: noqa
# This file is auto-generated by generate_tests.py.
from archive_query_log.results.test.test_utils import verify_serp_parsing
def test_parse_query_facebook_vanilla_1481832838():
verify_serp_parsing(
"https://web.archive.org/web/20161215201358id_/https://www.facebook.com/search/photos/?q=%23vanilla&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_virpi_soikkeli_1623257178():
verify_serp_parsing(
"https://web.archive.org/web/20210609164618id_/http://www.facebook.com/search/?q=virpi+soikkeli&o=2048&init=ffs",
"facebook",
)
def test_parse_query_facebook_deanna_sanchez_1629215596():
verify_serp_parsing(
"https://web.archive.org/web/20210817155316id_/https://www.facebook.com/search/web/direct_search.php?q=deanna+sanchez&dpr=1&ajaxpipe=1&ajaxpipe_token=AXivh35bR9s2xXXV&quickling%5Bversion%5D=3128950%3B0%3B&__user=100004323191030&__a=1&__dyn=5V4cjEzUGByC5A9UrEwlg94qbxqbAKGiyEyfirYw8ovyui9zob4q2i5UK3u2CEaUZ1ebkwy6UnGieKcVrDG4XzEa8iGt0gKum4UpKqqbAWCDxi5UWfz8gAxu1iyECQum2m4oqyU9omUmC-Wx2vgqx-Eth8gUKElCUmyE8XDh45EgAwzCwYyrK4rGUohES-9yaBy8CEO784afxK9yUvy8lUGaHCG2C&__af=j0&__req=jsonp_4&__be=0&__pc=PHASED%3ADEFAULT&__rev=3128950&__spin_r=3128950&__spin_b=trunk&__spin_t=1498867474&__adt=4",
"facebook",
)
def test_parse_query_facebook_mr_robot_1469187052():
verify_serp_parsing(
"https://web.archive.org/web/20160722113052id_/https://www.facebook.com/search/top/?q=mr%20robot",
"facebook",
)
def test_parse_query_facebook_https_peelarchivesblog_com_about_peel_1599241783():
verify_serp_parsing(
"https://web.archive.org/web/20200904174943id_/https://www.facebook.com/search/top?q=https%3A%2F%2Fpeelarchivesblog.com%2Fabout-peel%2F",
"facebook",
)
def test_parse_query_facebook_trumptrain_1461904486():
verify_serp_parsing(
"https://web.archive.org/web/20160429043446id_/https://www.facebook.com/search/top/?q=%23TrumpTrain&ref=top_filter&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_bernieorbust_1467812085():
verify_serp_parsing(
"https://web.archive.org/web/20160706133445id_/https://www.facebook.com/search/latest/?q=%23BernieOrBust&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_wisconsin_1463064570():
verify_serp_parsing(
"https://web.archive.org/web/20160512144930id_/https://www.facebook.com/search/top/?q=wisconsin&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_alda_lesbiennes_refugiees_1615284371():
verify_serp_parsing(
"https://web.archive.org/web/20210309100611id_/https://www.facebook.com/search/top/?q=alda%20-%20lesbiennes%20r%C3%A9fugi%C3%A9es&epa=SEARCH_BOX",
"facebook",
)
def test_parse_query_facebook_mens_health_survival_of_the_fittest_1619473718():
verify_serp_parsing(
"https://web.archive.org/web/20210426214838id_/http://www.facebook.com/search/?q=mens+health+survival+of+the+fittest&init=quick",
"facebook",
)
def test_parse_query_facebook_www_9xcb_biz_webex_setup_was_unsuccessful_error_23_1404412853():
verify_serp_parsing(
"https://web.archive.org/web/20140703184053id_/http://www.facebook.com/search.php?q=www.9xcb.biz/?WebEx+Setup+Was+Unsuccessful+Error+23",
"facebook",
)
def test_parse_query_facebook_tag_someone_who_needs_this_1587554575():
verify_serp_parsing(
"https://web.archive.org/web/20200422112255id_/https://www.facebook.com/search/videos/?q=tag%20someone%20who%20needs%20this&epa=FILTERS&filters=eyJycF9jcmVhdGlvbl90aW1lIjoie1wibmFtZVwiOlwiY3JlYXRpb25fdGltZVwiLFwiYXJnc1wiOlwie1xcXCJzdGFydF95ZWFyXFxcIjpcXFwiMjAyMFxcXCIsXFxcInN0YXJ0X21vbnRoXFxcIjpcXFwiMjAyMC0wNFxcXCIsXFxcImVuZF95ZWFyXFxcIjpcXFwiMjAyMFxcXCIsXFxcImVuZF9tb250aFxcXCI6XFxcIjIwMjAtMDRcXFwiLFxcXCJzdGFydF9kYXlcXFwiOlxcXCIyMDIwLTA0LTIwXFxcIixcXFwiZW5kX2RheVxcXCI6XFxcIjIwMjAtMDQtMjZcXFwifVwifSJ9",
"facebook",
)
def test_parse_query_facebook_blog_post_334_bootload_1567494170():
verify_serp_parsing(
"https://web.archive.org/web/20190903070250id_/https://developers.facebook.com/search/?q=blog+post+334+bootload¬found=0&search_filter_option=docs",
"facebook",
)
def test_parse_query_facebook_rosy_20gupta_1494524363():
verify_serp_parsing(
"https://web.archive.org/web/20170511173923id_/https://www.facebook.com/search/top/?q=rosy%2520gupta",
"facebook",
)
def test_parse_query_facebook_social_plugins_boutons_jaime_envoyer_partager_et_citations_js_exec_je31_1567485463():
verify_serp_parsing(
"https://web.archive.org/web/20190903043743id_/https://developers.facebook.com/search/?q=social+plugins+boutons+jaime+envoyer+partager+et+citations+js+exec+Je31¬found=1&search_filter_option=docs",
"facebook",
)
def test_parse_query_facebook_greet_1623235952():
verify_serp_parsing(
"https://web.archive.org/web/20210609105232id_/http://www.facebook.com/search/?q=Greet&o=2048&init=ffs",
"facebook",
)
def test_parse_query_facebook_cruzcrew_1459272010():
verify_serp_parsing(
"https://web.archive.org/web/20160329172010id_/https://www.facebook.com/search/photos/?q=%23CruzCrew&ref=top_filter&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_ineligible_1466870871():
verify_serp_parsing(
"https://web.archive.org/web/20160625160751id_/https://www.facebook.com/search/people/?q=%23ineligible&ref=top_filter",
"facebook",
)
def test_parse_query_facebook_solcellespecialisten_1389488036():
verify_serp_parsing(
"https://web.archive.org/web/20140112005356id_/http://da-dk.facebook.com/search.php?q=Solcellespecialisten&_fb_noscript=1",
"facebook",
)
def test_parse_query_facebook_blog_post_319_je31_1567459151():
verify_serp_parsing(
"https://web.archive.org/web/20190902211911id_/https://developers.facebook.com/search/?q=blog+post+319+Je31¬found=1&search_filter_option=news",
"facebook",
)
| 6,107 | 41.416667 | 596 | py |
anticipatr | anticipatr-main/src/main.py | import os
import argparse
import random
import numpy as np
import time
from pathlib import Path
import json
import datetime
import pickle
import torch
from torch.utils.data import DataLoader
import datasets
import utils.misc as utils
from datasets import build_dataset
from models import build_model
from engine import train_one_epoch, evaluate
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str,default="bf")
parser.add_argument('--root',type=str,help='Path to data root directory')
parser.add_argument('--num_nouns',type=int,default=1)
parser.add_argument('--num_verbs',type=int,default=48)
parser.add_argument('--num_actions',type=int,default=1)
parser.add_argument('--task',type=str,default='anticipation',choices=['anticipation','recognition'])
parser.add_argument('--anticipation',type=str,default='longfuture',choices=['nearfuture','longfuture'])
parser.add_argument('--pretraining_task',type=str,default='snippet_longfuture_anticipation')
parser.add_argument('--fps',type=int,default=60)
parser.add_argument('--label_type',type=str,default='verb',choices=['verb','noun','action'])
parser.add_argument('--action_repr',type=str,default='actionset',choices=['actionset'])
parser.add_argument('--train_many_shot',action='store_true',default=False,help='training with many shot verbs')
parser.add_argument('--split',type=int,default=1)
parser.add_argument('--train_timestamps',type=str,default='0.2,0.3,0.4,0.5,0.6,0.7,0.8')
parser.add_argument('--val_timestamps',type=str,default='0.25,0.5,0.75')
# model parameters
parser.add_argument('--model',type=str,default='antr')
parser.add_argument('--matcher_type',type=str,default='greedy', choices=['hungarian','greedy'])
parser.add_argument('--num_queries',type=int,default=10)
parser.add_argument('--num_pos_embed_dict',type=int,default=50000)
parser.add_argument('--dim_latent',type=int,default=128)
parser.add_argument('--hidden_dim',type=int,default=256)
parser.add_argument('--position_embedding',type=str,default='sine')
parser.add_argument('--num_decoder_embedding',type=int,default=10000)
parser.add_argument('--position_type',type=str,default='index',choices=['time','index'])
parser.add_argument('--dropout',type=float,default=0.1,help='transformer droput')
parser.add_argument('--nheads',type=int,default=8)
parser.add_argument('--dim_feedforward',type=int,default=2048)
parser.add_argument('--encoder',type=str,default='parallel')
parser.add_argument('--decoder',type=str,default='parallel')
parser.add_argument('--enc_layers',type=int,default=2)
parser.add_argument('--dec_layers',type=int,default=2)
parser.add_argument('--pretrained_enc_layers',type=int,default=2)
parser.add_argument('--pretrained_dec_layers',type=int,default=2)
parser.add_argument('--snippet_window',type=int,default=16)
parser.add_argument('--pretrained_path',type=str,default='')
parser.add_argument('--pre_norm',action='store_true')
parser.add_argument('--aux_loss',action='store_true')
parser.add_argument('--cuda',action='store_true',help='gpu mode')
parser.add_argument('--eval',action='store_true',help='evaluation mode')
parser.add_argument('--norm_type',type=str,choices=['gn','bn'],default='bn',help="normalization type")
parser.add_argument('--activation',type=str,default='leaky_relu',help="transformer activation type")
parser.add_argument('--set_cost_class',type=float,default=1,help='transformer droput')
parser.add_argument('--set_cost_segment',type=float,default=5,help='transformer droput')
parser.add_argument('--set_cost_siou',type=float,default=3,help='transformer droput')
parser.add_argument('--loss_coef_segment',type=float,default=5,help='transformer droput')
parser.add_argument('--loss_coef_siou',type=float,default=3,help='transformer droput')
parser.add_argument('--eos_coef',type=float,default=0.1,help='transformer droput')
# * Training
parser.add_argument('--resume',type=str,default='',help='resume from a checkpoint')
parser.add_argument('--save_checkpoint_every',type=int,default=1000,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every_epoch',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--num_workers',type=int,default=0,help='number of workers')
parser.add_argument('--batch_size',type=int,default=2,help='batch_size')
parser.add_argument('--epochs',type=int,default=10,help='number of epochs')
parser.add_argument('--step_size',type=int,default=64,help='number of steps before backpropagation')
parser.add_argument('--start_epoch',type=int,default=0,help='starting epoch')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_joiner', default=0, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=1, type=float,help='gradient clipping max norm')
parser.add_argument('--output_dir', type=str,default='./experiments/checkpoints/',help='path to save intermediate checkpoints')
# * Distributed Training
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--device', default='cuda',help='device to use for training / testing')
args = parser.parse_args()
print(args)
def main(args):
bz = args.batch_size
lr = args.lr
if args.cuda:
if torch.cuda.device_count() >= 1:
utils.init_distributed_mode(args)
device = torch.device(args.device)
else:
device = torch.device('cpu')
# fix the seed for reproducibility
if args.cuda:
seed = args.seed + utils.get_rank()
else:
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# datasets build
dataset_train = build_dataset(args=args, mode="train")
dataset_test = build_dataset(args=args, mode="val")
if args.cuda and args.distributed:
sampler_train = torch.utils.data.distributed.DistributedSampler(dataset_train,shuffle=True)
sampler_test = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_test = DataLoader(dataset_test, 1, sampler=sampler_test, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
# set up model
model, criterion = build_model(args)
model.to(device)
criterion.to(device)
model_without_ddp = model
if args.cuda and args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
model_with_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
# set up model training
param_dicts = [{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" not in n and p.requires_grad]},
{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" in n and p.requires_grad], "lr": args.lr_joiner,},]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# output and checkpoints directory
checkpoint_dir = args.output_dir
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
if args.resume:
checkpoint = Path(args.resume)
assert checkpoint.exists()
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
print("Start Training")
start_time = time.time()
optimizer.zero_grad()
for epoch in range(args.start_epoch, args.epochs):
if args.cuda and args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(epoch, args.clip_max_norm, model, criterion, data_loader_train, optimizer, lr_scheduler, device)
if args.output_dir:
checkpoint_dir = Path(checkpoint_dir)
checkpoint_paths = [checkpoint_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % args.save_checkpoint_every == 0:
checkpoint_paths.append(checkpoint_dir / f'checkpoint{epoch:05}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args,}, checkpoint_path)
# evaluation
if epoch % args.evaluate_every_epoch == 0:
test_stats = evaluate(epoch, model, criterion, data_loader_test, args.dataset, args.evaluate_every, device)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (checkpoint_dir / 'log.json').open("a") as f:
f.write(json.dumps(log_stats) + "\n")
lr_scheduler.step()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
main(args)
| 10,477 | 46.627273 | 208 | py |
anticipatr | anticipatr-main/src/engine.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import os,sys
import copy
import numpy as np
import math
from typing import Iterable
import time
import utils.misc as utils
import datasets
from metrics.longfuture_metrics import AnticipationEvaluator
def train_one_epoch(epoch, max_norm, model, criterion, data_loader, optimizer, scheduler, device):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
step = 0
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
loss_value.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
train_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items() if 'AP' not in k}
print("Train epoch:", epoch, "Averaged stats:", train_stats)
return train_stats
def evaluate(epoch, model, criterion, data_loader, dataset, evaluate_every, device):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask,targets, tgt_mask)
losses = criterion(outputs, targets)
losses_metric = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
# convert dict[key, tensor (b,x,x)] to list of length b with dict(str, tensor (x,x))
losses_metric = [{k:v[i] for k,v in losses_metric.items()} for i in range(samples.tensors.size(0))]
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
metric_logger.update(loss=losses_reduced_scaled.item(), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist()): output for target, output in zip(targets,losses_metric)}
predictions.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation need to gather all data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % evaluate_every == 0:
evaluator = AnticipationEvaluator(dataset)
test_stats = evaluator.evaluate(all_predictions)
print("Test epoch:", epoch, "Averaged test stats:", test_stats)
return test_stats
| 4,794 | 37.36 | 141 | py |
anticipatr | anticipatr-main/src/snippet_models/model.py | import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
import numpy as np
from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class EncoderSnippetLongfutureAnticipation(nn.Module):
def __init__(self, joiner, transformer, dim_feedforward, num_classes, num_queries, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of action queries, ie decoder outputs.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets=None, tgt_mask=None):
""" The forward expects two inputs:
- samples.tensor: batched videos features, of shape [batch_size x 2048 x T]
- samples.mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-action) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized segments coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions=torch.empty_like(mask)
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
hs = self.transformer(input,mask, tgt_mask, self.query_embed.weight,pos)[0]
outputs_class = self.class_embed(hs)
return outputs_class
class CriterionSnippetLongfutureAnticipation(nn.Module):
def __init__(self, num_classes, weight_dict, eos_coef, losses,fps):
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.losses = losses
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def get_mAP(self,pred,labels,label_mask):
mAPs = dict()
for i in range(label_mask.shape[0]):
pred_i = pred[:, label_mask[i]]
labels_i = labels[:, label_mask[i]]
mAPs['mAP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
if mAPs['mAP_{}'.format(i)].ndim == 1:
mAPs['mAP_{}'.format(i)] = mAPs['mAP_{}'.format(i)].unsqueeze(0)
return mAPs
def loss_labels(self,outputs, targets,log=True):
src_logits = torch.sigmoid(outputs['pred_logits'].mean(1))
target_classes = torch.cat([t['labels'].unsqueeze(0) for t in targets])
loss_ce = F.binary_cross_entropy(src_logits, target_classes,reduction='mean')
losses = {'loss_ce': loss_ce}
losses.update(self.get_mAP(src_logits, target_classes, targets[0]['label_mask']))
return losses
def get_loss(self, loss, outputs, targets, **kwargs):
loss_map = {
'labels': self.loss_labels
}
assert loss in loss_map, f'{loss} loss not defined'
return loss_map[loss](outputs,targets,**kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets))
if 'aux_outputs' in outputs:
for i,aux_outputs in enumerate(outputs['aux_outputs']):
for loss in self.losses:
kwargs = {}
kwargs = {'log' : False}
l_dict = self.get_loss(loss,aux_outputs,targets,**kwargs)
l_dict = {k + f'_{i}': v for k,v in l_dict.items()}
losses.update(l_dict)
return losses
class Identity(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
def replace_last_layer(model):
model.class_embed = Identity()
return model
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
losses = ['labels']
weight_dict = {'loss_ce':1}
model = EncoderSnippetLongfutureAnticipation(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
num_classes=num_classes,
num_queries=1,
aux_loss=args.aux_loss,
)
criterion = CriterionSnippetLongfutureAnticipation(num_classes=num_classes, weight_dict=weight_dict, eos_coef=args.eos_coef, losses=losses,fps=args.fps)
model = replace_last_layer(model)
print(model)
return model
| 6,287 | 35.77193 | 156 | py |
anticipatr | anticipatr-main/src/snippet_models/position_encoding.py | """
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
"""
Sinusoidal positional encodings based on sequence timestamps
"""
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,693 | 33.571429 | 97 | py |
anticipatr | anticipatr-main/src/snippet_models/transformer.py | """
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, encoding='parallel', decoding='no_decoder', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if decoding != 'no_decoder':
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
pos_embed = pos_embed.permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory = self.encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
tgt_mask = None
if self.decoding != 'no_decoder':
hs = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_key_padding_mask=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0)
elif self.decoding == 'no_decoder':
return memory.permute(1,0,2), torch.empty(memory.size()).to(memory.device)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
encoding=args.encoder,
decoding="no_decoder",
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,622 | 39.458333 | 139 | py |
anticipatr | anticipatr-main/src/snippet_models/joiner.py | """
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
if self.position_type == 'index' and self.position_encoding_type=='sine':
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 959 | 29.967742 | 118 | py |
anticipatr | anticipatr-main/src/models/matcher.py | import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
import numpy as np
import utils.segment_utils as segment_utils
class GreedyMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_action. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-actions).
"""
def __init__(self):
"""Creates the matcher
"""
super().__init__()
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_segments": Tensor of dim [batch_size, num_queries, 2] with the predicted segment timestamps
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_segments] (where num_target_segments is the number of ground-truth
actions in the target) containing the class labels
"segmentes": Tensor of dim [num_target_segments, 2] containing the target segment timestamps
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_segmentes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_segment = outputs["pred_segments"].flatten(0, 1) # [batch_size * num_queries, 2]
scale_factor = torch.stack([t["prediction_duration"] for t in targets], dim=0)
out_segment_scaled = out_segment * scale_factor.unsqueeze(1).repeat(1,num_queries,1).flatten(0,1).repeat(1,2)
tgt_segment = torch.cat([v["segments"] for v in targets])
tgt_segment_scaled = torch.cat([v["segments"] * v['prediction_duration'] for v in targets])
indices = []
for i in range(bs):
targets_i = targets[i]['segments'] * targets[i]['prediction_duration']
targets_i = targets_i[torch.sort(targets_i[:,1]-targets_i[:,0], descending=True)[1]]
preds_i = outputs['pred_segments'][i] * scale_factor[i][0]
tgt_i = []
p_i = []
seen_pidx = []
for tidx, tgt in enumerate(targets_i):
sorted_iou = torch.sort(segment_utils.generalized_segment_iou(tgt.unsqueeze(0), preds_i), descending=True,dim=0,stable=True)[1]
for s in sorted_iou:
if s not in seen_pidx:
pidx = s
seen_pidx.append(s)
break
tgt_i.append(tidx)
p_i.append(pidx)
unseen_pidx = [p for p in range(num_queries) if p not in seen_pidx]
for up_idx in unseen_pidx:
p_i.append(up_idx)
tgt_i.append(-1)
indices.append(torch.cat((torch.as_tensor(p_i,dtype=torch.int64).unsqueeze(0),torch.as_tensor(tgt_i,dtype=torch.int64).unsqueeze(0)),dim=0))
sizes = [len(v["segments"]) for v in targets]
return [torch.as_tensor(i, dtype=torch.int64) for i in indices]
def build_matcher(args):
return GreedyMatcher()
| 3,980 | 46.392857 | 152 | py |
anticipatr | anticipatr-main/src/models/antr.py | import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
from .matcher import build_matcher
import snippet_models
import numpy as np
from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized
from utils import segment_utils as segment_utils
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class ANTR(nn.Module):
def __init__(self, joiner, transformer, output_type, dim_feedforward, num_classes, num_queries, num_decoder_embedding, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of anticipation queries, ie, decoder ouputs. This is the maximal number of actions the model can predict given a video.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.hidden_dim = hidden_dim
self.output_type = output_type
self.num_queries = num_queries
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.query_time_embed = nn.Linear(hidden_dim + 1, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.segments_embed = MLP(hidden_dim, hidden_dim, 2, 3)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets,tgt_mask=None):
""" The forward expects two inputs:
- samples: batched videos features, of shape [batch_size x 2048 x T]
- mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-action) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized segments coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions = torch.empty_like(mask).to(samples.device) ## for positional encodings
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
b, l, c = input.size()
query_pos = self.query_embed.weight.unsqueeze(1).repeat(1, b, 1)
nq = query_pos.size(0)
prediction_times = torch.stack([t['prediction_duration'] for t in targets], axis=0).squeeze(1).repeat(1, nq, 1).permute(1,2,0)
query_and_prediction_times = torch.cat([query_pos, prediction_times], axis=2)
decoder_pos = self.query_time_embed(query_and_prediction_times.reshape(b * nq, self.hidden_dim + 1)).reshape(b,nq,-1).permute(1,0,2)
hs = self.transformer(input,src, mask, tgt_mask, decoder_pos,pos)[0]
outputs_class = self.class_embed(hs)
outputs_segments = self.segments_embed(hs)
outputs_segments = F.relu(outputs_segments) + 0.1
out = {'pred_logits': outputs_class[-1], 'pred_segments': outputs_segments[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_segments)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_segments):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_segments': b} for a, b in zip(outputs_class[:-1], outputs_segments[:-1])]
class CriterionGreedyMatcher(nn.Module):
""" This class computes the loss for ANTICIPATR.
The process happens in two steps:
1) we compute greedy assignment between ground truth segments and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and segment)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of action categories, omitting the special no-action category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-action category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.matcher = matcher
self.losses = losses
self.eos_coef=eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src,_) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def loss_labels(self, outputs, targets, indices, num_segments, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_segments]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_segments(self, outputs, targets, indices, num_segments):
"""Compute the losses related to the segments, the L1 regression loss and the IoU loss
targets dicts must contain the key "segments" containing a tensor of dim [num_segments, 2]
"""
assert 'pred_segments' in outputs
idx = self._get_src_permutation_idx(indices)
src_segments = outputs['pred_segments'][idx].squeeze(1)
target_segments = torch.cat([t['segments'][i] for t, (_, i) in zip(targets, indices)], dim=0).squeeze(1)
loss_segment = F.l1_loss(src_segments, target_segments, reduction='none')
losses = {}
losses['loss_segment'] = loss_segment.sum()/num_segments
loss_siou = 1 - torch.diag(segment_utils.generalized_segment_iou(src_segments,target_segments))
losses['loss_siou'] = loss_siou.sum()/num_segments
return losses
def get_unrolled_timeline(self, outputs, targets):
src_logits = F.softmax(outputs['pred_logits'],dim=2)
b,q,c = src_logits.size()
src_segments = outputs['pred_segments']
scale_factor = torch.cat([t['prediction_duration'].unsqueeze(0) for t in targets]).repeat(1,2)
src_segments_scaled = src_segments * scale_factor[:,None,:]
fps = targets[0]['fps']
out_logits = torch.zeros(b,int(torch.round(torch.max(torch.cat([t['prediction_duration'] for t in targets])))),self.num_classes+1).to(src_logits.device)
for bidx in range(b):
for sidx in range(len(src_segments_scaled[bidx])):
s = max(int(src_segments_scaled[bidx][sidx][0]), 0)
e = min(int(src_segments_scaled[bidx][sidx][1]), out_logits.size(1))
for tidx in range(s,e):
out_logits[bidx,tidx,:] = torch.max(out_logits[bidx,tidx,:], src_logits[bidx][sidx])
output_classes_onehot = torch.tensor(F.one_hot(torch.argmax(out_logits[:,:,:-1],dim=2),num_classes=self.num_classes),dtype=torch.float32)
target_classes = torch.zeros(b,out_logits.size(1),c-1).to(src_logits.device)
for idx, t in enumerate(targets):
target_classes[idx,:, :t['labels_onehot'].size(0)] = t['labels_onehot']
return output_classes_onehot, target_classes
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_segments):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty segments
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-action" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def get_mAP(self, pred, labels, label_mask):
mAPs = dict()
pred = torch.clip(pred.sum(1), min=0.0,max=1.0)
labels = torch.clip(labels.sum(1), min=0.0,max=1.0)
for i in range(label_mask.shape[1]):
if torch.sum(label_mask[0][i]) > 0:
pred_i = pred[:, label_mask[0][i]].squeeze(1)
labels_i = labels[:, label_mask[0][i]].squeeze(1)
mAPs['AP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
return mAPs
def get_accuracy(self,pred,labels, outputs, targets):
acc = dict()
for i in range(pred.shape[0]):
k_r_p = 'acc_{}_{}'.format(int(targets[i]['ratio_idx']*100), int(targets[i]['prediction_idx']*100))
# check if the key already exists in output dict
if k_r_p in acc:
acc[k].append(torch.cat((pred[i].detach().cpu(), labels[i].detach().cpu()),1))
# if it doesn't exist create a key and value pair
else:
acc[k] = torch.cat((pred[i].detach().cpu(), labels[i].detach().cpu()),1)
return acc
def get_loss(self, loss, outputs, targets, indices, num_segments, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'segments': self.loss_segments,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_segments, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
all_indices = self.matcher(outputs_without_aux, targets)
indices = [idx[:,(idx[1,:] + 1).nonzero(as_tuple=False)] for idx in all_indices]
# Compute the average number of target segments accross all nodes, for normalization purposes
num_segments = sum(len(t["labels"]) for t in targets)
num_segments = torch.as_tensor([num_segments], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_segments)
num_segments = torch.clamp(num_segments / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_segments))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_segments, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
pred,labels = self.get_unrolled_timeline(outputs, targets)
losses.update(self.get_mAP(pred,labels, targets[0]['label_mask']))
losses.update(self.get_accuracy(pred, labels, outputs, targets))
return losses
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
num_queries = args.num_queries
model = ANTR(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
output_type=args.action_repr,
num_classes=num_classes,
num_queries=num_queries,
num_decoder_embedding=args.num_decoder_embedding,
aux_loss=args.aux_loss,
)
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'segments', 'cardinality']
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_segment': args.loss_coef_segment, 'loss_siou': args.loss_coef_siou}
criterion = CriterionGreedyMatcher(num_classes, matcher, weight_dict=weight_dict, eos_coef=args.eos_coef, losses=losses)
print(model)
return model, criterion
| 15,660 | 46.457576 | 163 | py |
anticipatr | anticipatr-main/src/models/position_encoding.py | """
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,604 | 33.891304 | 97 | py |
anticipatr | anticipatr-main/src/models/transformer.py | """
Transformer class.
Code inspired by torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* decoder handles multiple encoders
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import os, sys
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from snippet_models import build_snippet_model
class TransformerMultipleEncoder(nn.Module):
def __init__(self, snippet_model, d_model=512, nhead=8, num_encoder_layers=6,num_pretrained_layers=4,snippet_window=32,
num_decoder_layers=6, encoding='parallel', decoding='parallel', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False,pretrained_path=''):
super().__init__()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
self.snippet_window = snippet_window
## construct video encoder
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.video_encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self.snippet_encoder = snippet_model
for param in self.snippet_encoder.parameters():
param.requires_grad = False
# load pretrained snippet encoder
if pretrained_path != '' and os.path.exists(pretrained_path):
self.snippet_encoder.eval()
model_dict = self.snippet_encoder.state_dict()
pretrained_dict = torch.load(pretrained_path,map_location=torch.device('cpu'))['model']
pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.snippet_encoder.load_state_dict(model_dict)
## construct decoder
decoder_layer = TransformerMultipleEncoderDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerMultipleEncoderDecoder(decoder_layer, num_decoder_layers, decoder_norm)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, orig_src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
orig_src = orig_src.permute(2,0,1)
pos_embed = pos_embed.permute(2, 0, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory_snippet = None
## extracting snippet representations and handling overflow properly
## overflow needs to be handled as video length might not be a multiple
## of the size of snippet length used in snippet encoder
if t % self.snippet_window == 0:
memory_snippet = self.snippet_encoder(orig_src.reshape(bs * (t//self.snippet_window), -1, self.snippet_window),
mask=torch.zeros((bs * (t//self.snippet_window),self.snippet_window),dtype=torch.bool).to(orig_src.device)).reshape(bs,-1,t)
else:
overflow = t % self.snippet_window
windows_length = t - (t % self.snippet_window)
windows_memory = self.snippet_encoder(orig_src[:windows_length,:,:].reshape(bs * (windows_length//self.snippet_window), -1, self.snippet_window),
mask=torch.zeros((bs * (windows_length//self.snippet_window), self.snippet_window),dtype=torch.bool).to(orig_src.device))
overflow_memory = self.snippet_encoder(orig_src[-overflow:,:,:].reshape(bs, -1, overflow), mask=torch.zeros((bs,overflow),dtype=torch.bool).to(orig_src.device))
memory_snippet = torch.cat((windows_memory.reshape(bs,-1,windows_length), overflow_memory.reshape(bs, -1, overflow)), dim=2)
memory_video = self.video_encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
memory_snippet = memory_snippet.reshape(t,bs,c)
tgt_mask = None
hs = self.decoder(tgt, memory_snippet, memory_video, tgt_mask, memory_key_padding_mask_1=src_mask, memory_key_padding_mask_2=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory_video.permute(1, 2, 0)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerMultipleEncoderDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory_1, memory_2, tgt_mask=tgt_mask,
memory_mask_1=memory_mask_1, memory_mask_2=memory_mask_2,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask_1=memory_key_padding_mask_1,
memory_key_padding_mask_2=memory_key_padding_mask_2,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerMultipleEncoderDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn1 = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn2 = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.norm4 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn1(query=self.with_pos_embed(tgt, query_pos),
key=memory_1,
value=memory_1, attn_mask=memory_mask_1,
key_padding_mask=memory_key_padding_mask_1)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.multihead_attn2(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory_2, pos),
value=memory_2, attn_mask=memory_mask_2,
key_padding_mask=memory_key_padding_mask_2)[0]
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm4(tgt)
return tgt
def forward(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
return self.forward_post(tgt, memory_1, memory_2, tgt_mask, memory_mask_1, memory_mask_2,
tgt_key_padding_mask, memory_key_padding_mask_1,
memory_key_padding_mask_2, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
snippet_model = build_snippet_model(args)
return TransformerMultipleEncoder(
snippet_model,
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_pretrained_layers=args.pretrained_enc_layers,
num_decoder_layers=args.dec_layers,
snippet_window=args.snippet_window,
encoding=args.encoder,
decoding=args.decoder,
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
pretrained_path=args.pretrained_path,
#combination_mode=args.combination
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 14,850 | 43.199405 | 180 | py |
anticipatr | anticipatr-main/src/models/joiner.py | """
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 873 | 28.133333 | 118 | py |
anticipatr | anticipatr-main/src/metrics/longfuture_metrics.py | """
Evaluator class for action anticipation benchmarks
"""
import math
import numpy as np
import torch
import warnings
from collections import OrderedDict
warnings.filterwarnings("ignore", category=UserWarning)
import sklearn.metrics as skmetrics
class AnticipationEvaluator(object):
def __init__(self,dataset):
self.apmeter = OrderedDict()
self.output = OrderedDict()
if dataset in ['ek','egtea']:
prediction_type = 'time_independent'
elif dataset in ['bf','salads']:
prediction_type = 'time_conditioned'
self.prediction_type = prediction_type
self.accmeter = OrderedDict()
self.output['mAP_micro'] = []
self.output['mAP_macro'] = []
def get_AP_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
targets = {}
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'AP' in k_ap:
preds[k_ap].append(v_ap[:v_ap.size(0)//2].numpy())
targets[k_ap].append(v_ap[v_ap.size(0)//2:].numpy())
for k_ap,v_ap in preds.items():
y_true = np.asarray([t for t in targets[k_ap]])
y_pred = np.asarray([p for p in preds[k_ap]])
if 'AP' in k_ap:
self.output['mAP_macro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='macro'))
self.output['mAP_micro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='micro'))
def get_accuracy_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
targets = {}
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'acc' in k_ap:
if k_ap not in self.accmeter:
preds[k_ap] = []
targets[k_ap] = []
if v_ap.ndim == 1:
v_ap = v_ap.unsqueeze(0)
preds[k_ap].append(v_ap[:,:v_ap.size(1)//2].numpy())
targets[k_ap].append(v_ap[:,v_ap.size(1)//2:].numpy())
for k,v in predictions[0].items():
for k_ap,v_ap in v.items():
if 'acc' in k_ap and v_ap.size(0) > 0:
self.output[k_ap] = []
preds[k_ap] = np.asarray(preds[k_ap])
preds[k_ap] = preds[k_ap].reshape(-1, preds[k_ap].shape[-1])
targets[k_ap] = np.asarray(targets[k_ap])
targets[k_ap] = targets[k_ap].reshape(-1, targets[k_ap].shape[-1])
for cls in range(targets[k_ap].shape[1]):
preds_logits = preds[k_ap][:,cls]
max_prob_cls = np.argmax(preds_logits, axis=1) # obtaining max probability class
preds_i = np.zeros_like(preds_logits)
preds_i[max_prob_cls] = 1 # get most likely predicted class
labels_i = targets[k_ap][:,cls]
self.output[k_ap].append((1-skmetrics.hamming_loss(labels_i,preds_i)) * 100)
def evaluate(self,predictions):
## Epic-Kitchens-55 and EGTEA Gaze+ evaluation
if self.prediction_type == 'time_independent':
self.get_AP_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if k in ['mAP_macro', 'mAP_micro']:
metrics[k] = np.mean(np.asarray(v))
return metrics
## Breakfast and 50Salads evaluation
if self.prediction_type == 'time_conditioned':
self.get_accuracy_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if 'acc' in k:
metrics[k] = np.mean(np.asarray(v))
return metrics
| 4,064 | 40.907216 | 151 | py |
anticipatr | anticipatr-main/src/datasets/bf.py | """
Constructs a dataloader for breakfast dataset for the task of long term action anticipation.
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_longfuture import SequenceDatasetLongFuture
def build_bf_anticipation(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl"
label_type = 'verb'
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/bf_verbs.csv'}
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
timestamps = '0.2,0.3'
val_timestamps = [float(t) for t in timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'action_repr': args.action_repr,
'prediction_type': 'time_conditioned',
'train_timestamps': train_timestamps,
'val_timestamps': val_timestamps,
'num_verbs': args.num_verbs,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,600 | 33.804348 | 126 | py |
anticipatr | anticipatr-main/src/datasets/ek.py | """
Constructs a dataloader for Epic-Kitchens-55 for the task of long term action anticipation.
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_longfuture import SequenceDatasetLongFuture
#verbs, nouns,action: 125,3522,3806
#train_many_shot --verb,noun,action: 26,32,250
def build_ek_anticipation(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "{}_lfb_s30_{}.pkl".format(mode,'verb')
label_type = '' if args.label_type == 'action' else args.label_type
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/EPIC_many_shot_verbs.csv', 'noun':'data/ek/longfuture/annotations/EPIC_many_shot_nouns.csv'}
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
timestamps = '0.25,0.5,0.75'
val_timestamps = [float(t) for t in timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'action_repr': args.action_repr,
'prediction_type': 'time_independent',
'train_timestamps': train_timestamps,
'val_timestamps': val_timestamps,
'num_verbs': args.num_verbs ,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,834 | 38.042553 | 153 | py |
anticipatr | anticipatr-main/src/datasets/__init__.py | import torch.utils.data
import torchvision
def build_dataset(args, mode):
if args.dataset == 'ek':
from datasets.ek import build_ek_anticipation
return build_ek_anticipation(args=args, mode=mode)
elif args.dataset == 'bf':
from datasets.bf import build_bf_anticipation
return build_bf_anticipation(args=args, mode=mode)
| 363 | 27 | 58 | py |
anticipatr | anticipatr-main/src/datasets/baseds_longfuture.py | import bisect
import copy
import os
import os.path as osp
import random
from functools import partial
import itertools
import numpy as np
import pickle as pkl
import collections
from collections import Sequence
import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from datasets import ds_utils
class DatasetSegmentRecord(object):
def __init__(self, row, clip_range=None):
self._data = row
self.clip_range = clip_range
@property
def path(self):
return self._data[0]
@property
def start_frame(self):
return int(self._data[1])
@property
def end_frame(self):
return int(self._data[2])
@property
def label(self):
return [int(x) for x in self._data[3:]]
@property
def num_frames(self):
return self.end_frame - self.start_frame + 1
@property
def clip_start_frame(self):
return int(self._data[1]) if self.clip_range is None else int(self.clip_range[0])
@property
def clip_end_frame(self):
return int(self._data[2]) if self.clip_range is None else int(self.clip_range[1])
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
def get_many_shot(fin):
with open(fin, "r") as f:
lines = f.readlines()[1:]
classes = [int(line.split(',')[0]) for line in lines]
return classes
class SequenceDatasetLongFuture(Dataset):
def __init__(self, feature_file, ann_file, label_type, test_mode, task, fps, dset, action_repr, prediction_type, train_timestamps, val_timestamps, num_verbs, num_nouns, num_actions, train_many_shot=False, manyshot_annotations={}, **kwargs):
self.feature_file = feature_file
self.ann_file = ann_file
self.test_mode = test_mode
self.label = label_type
self.task = task
self.dset = dset
self.action_repr = action_repr
self.prediction_type = prediction_type
self.train_many_shot = train_many_shot
self.train_timestamps = train_timestamps
self.val_timestamps = val_timestamps
self.num_verbs = num_verbs
self.num_nouns = num_nouns
self.num_actions = num_actions
self.train_prediction_interval = 10 ## time in seconds ; used only in training
self.fps = fps
if train_many_shot:
manyshot_verbs = sorted(get_many_shot(manyshot_annotations['verb']))
manyshot_nouns = sorted(get_many_shot(manyshot_annotations['noun']))
self.num_verbs, self.num_nouns = len(manyshot_verbs), len(manyshot_nouns)
self.manyshot_verbs, self.manyshot_nouns = manyshot_verbs, manyshot_nouns
else:
manyshot_nouns, manyshot_verbs = [],[]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(self.ann_file)]
if self.dset in ['ek','egtea']:
int_counts = [(record.label[0], record.label[1]) for record in records]
int_counts = collections.Counter(int_counts).items()
int_counts = sorted(int_counts, key=lambda x: -x[1])[0:self.num_actions]
self.int_to_idx = {interact:idx for idx, (interact, count) in enumerate(int_counts)}
else:
self.int_to_idx = {}
if prediction_type=='time_independent':
self.data = self.load_annotations_anticipation_time_independent(ann_file)
elif prediction_type=='time_conditioned':
self.data = self.load_annotations_anticipation_time_conditioned(ann_file)
if train_many_shot:
for record in self.data:
record.verbs = [manyshot_verbs.index(x) for x in record.verbs if x in manyshot_verbs]
record.nouns = [manyshot_nouns.index(x) for x in record.nouns if x in manyshot_nouns]
# Only a few nouns/ints will actually have gt positives
# Pass these as part of the batch to evaluate mAP
# Don't know how to pass these in the config
eval_ints = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_ints |= set(record.ints)
eval_set = torch.zeros(1, self.num_actions)
eval_set[0, list(eval_ints)] = 1
self.eval_ints = eval_set.byte()
else:
self.eval_ints = torch.zeros(1, self.num_actions).byte()
eval_nouns = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_nouns |= set(record.nouns)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
self.eval_nouns = eval_set.byte()
else:
eval_set = torch.zeros(3, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
manyshot = eval_nouns & set(manyshot_nouns)
rareshot = eval_nouns - set(manyshot_nouns)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_nouns = eval_set.byte()
else:
self.eval_nouns = torch.zeros(1, self.num_actions).byte()
eval_verbs = set()
for record in self.data:
eval_verbs |= set(record.verbs)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
else:
eval_set = torch.zeros(3, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
manyshot = eval_verbs & set(manyshot_verbs)
rareshot = eval_verbs - set(manyshot_verbs)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_verbs = eval_set.byte()
self.prepare = RecordAnticipationData(self.action_repr, self.prediction_type, self.feature_file, self.dset, self.num_nouns, self.num_verbs, self.num_actions, self.int_to_idx, self.fps, self.label, self.eval_verbs, self.eval_nouns, self.eval_ints)
def load_annotations_anticipation_time_independent(self, ann_file):
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if self.test_mode:
timestamps = self.val_timestamps
else:
timestamps = self.train_timestamps # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
timestamps = [int(frac*length) for frac in timestamps]
for i, t in enumerate(timestamps):
past_records = [record for record in vrecords if record.end_frame<=t]
future_records = [record for record in vrecords if record.start_frame>t]
if len(past_records)<3 or len(future_records)<3:
continue
record = DatasetSegmentRecord([vid, 0, t, -1, -1])
if self.dset in ['ek','egtea']:
record.instances = [dict(segment=[record.start_frame,record.end_frame], verb=record.label[0], noun=record.label[1], action=self.int_to_idx[(record.label[0],record.label[1])]) for record in future_records if (record.label[0],record.label[1]) in self.int_to_idx]
record.nouns = sorted(set([record.label[1] for record in future_records]))
record.ints = sorted(set([self.int_to_idx[(record.label[0], record.label[1])] for record in future_records if (record.label[0], record.label[1]) in self.int_to_idx]))
record.verbs =sorted(set([record.label[0] for record in future_records]))
record.fps = self.fps
record.ratio_idx = i
record.prediction_idx = 1
record.duration = length
record.prediction_duration = length - t
record.observation_duration = t
records.append(record)
print(self.dset, ": time-independent anticipation", len(records))
return records
def load_annotations_anticipation_time_conditioned(self, ann_file):
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if self.test_mode:
timestamps = self.val_timestamps
unseen_timestamps = [0.1, 0.2, 0.3, 0.4, 0.5]
else:
timestamps = self.train_timestamps # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
unseen_timestamps = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
seen_timestamps = [int(frac*length) for frac in timestamps]
for i, t in enumerate(seen_timestamps):
past_records = [record for record in vrecords if record.end_frame<=t]
prediction_timestamps = [int(frac*(length - t)) + t for frac in unseen_timestamps]
# prediction_timestamps = [min(t,length-t) for pt in prediction_timestamps]
for j, pred_t in enumerate(prediction_timestamps):
future_records = [record for record in vrecords if record.start_frame>t and record.end_frame<=pred_t]
if len(past_records)<3 or len(future_records)<3:
continue
record = DatasetSegmentRecord([vid, 0, t, -1, -1])
record.instances = [dict(segment=[record.start_frame,record.end_frame], verb=record.label[0]) for record in future_records]
record.verbs =sorted(set([record.label[0] for record in future_records]))
record.fps = self.fps
record.ratio_idx = timestamps[i]
record.prediction_idx = unseen_timestamps[j]
record.duration = length
record.prediction_duration = pred_t - t
record.observation_duration = t
records.append(record)
print(self.dset,": time-conditioned anticipation", len(records))
return records
def get_ann_info(self, idx):
return {
'path': self.data[idx].path,
'num_frames': self.data[idx].num_frames,
'label': self.data[idx].label
}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
vrecord = self.data[idx]
inputs, targets = self.prepare(vrecord)
return inputs, targets
class RecordAnticipationData(object):
def __init__(self, action_repr, prediction_type, feature_file, dset, num_nouns, num_verbs, num_actions, int_to_idx, fps, label_type, eval_verbs, eval_nouns, eval_actions):
self.action_repr = action_repr
self.prediction_type = prediction_type
self.feature_file = feature_file
self.dset = dset
self.num_nouns = num_nouns
self.num_verbs = num_verbs
self.num_actions = num_actions
self.int_to_idx = int_to_idx
self.fps = fps
self.label_type = label_type
self.eval_verbs = eval_verbs
self.eval_nouns = eval_nouns
self.eval_actions = eval_actions
with open(feature_file,'rb') as f:
self.feature_data = pkl.load(f)
def __call__(self, vrecord):
## features of past records
vidname = vrecord.path
duration = vrecord.duration
features = []
observation_positions = []
for idx in range(vrecord.start_frame-31,vrecord.end_frame+31):
if idx in self.feature_data[vidname].keys():
# set fps to choose the sampling rate (TODO: set as argument)
fps = 1
if idx% fps ==0:
features.append(self.feature_data[vidname][idx])
observation_positions.append(idx)
features = torch.tensor(features,dtype=torch.float32).permute(1,0)
observation_positions = torch.tensor(observation_positions,dtype=torch.float32)
video_id = ds_utils.getVideoId(self.dset, vidname)
## output representation
set_targets = {}
set_targets['video_id'] = torch.tensor(video_id)
if self.label_type == 'action':
label = torch.zeros(self.num_actions)
label[vrecord.ints] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['action'] for instance in vrecord.instances])
num_classes = self.num_actions
set_targets['label_mask'] = to_tensor(self.eval_actions)
elif self.label_type == 'verb':
label = torch.zeros(self.num_verbs)
label[vrecord.verbs] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['verb'] for instance in vrecord.instances])
num_classes = self.num_verbs
set_targets['label_mask'] = to_tensor(self.eval_verbs)
elif self.label_type == 'noun':
label = torch.zeros(self.num_nouns)
label[vrecord.nouns] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['nouns'] for instance in vrecord.instances])
num_classes = self.num_nouns
set_targets['label_mask'] = to_tensor(self.eval_nouns)
set_targets['segments'] = [(np.asarray(instance['segment']) - vrecord.observation_duration)/vrecord.prediction_duration for instance in vrecord.instances]
set_targets['segments'] = torch.tensor(set_targets['segments'],dtype=torch.float32)
set_targets['labels_onehot'] = torch.tensor(set_targets['labels_onehot'], dtype=torch.float32)
set_targets['duration'] = torch.tensor([vrecord.duration/self.fps],dtype=torch.float32)
set_targets['prediction_duration'] = torch.tensor([vrecord.prediction_duration],dtype=torch.float32)
set_targets['observation_duration'] = torch.tensor([(vrecord.end_frame - vrecord.start_frame)],dtype=torch.float32)
set_targets['ratio_idx'] = torch.tensor([vrecord.ratio_idx],dtype=torch.float32)
set_targets['prediction_idx'] = torch.tensor([vrecord.prediction_idx],dtype=torch.float32)
set_targets['observation_positions'] = observation_positions
set_targets['fps'] = torch.tensor([vrecord.fps],dtype=torch.float32)
return features, set_targets
| 16,321 | 42.641711 | 280 | py |
anticipatr | anticipatr-main/src/utils/misc.py | """
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references and
https://github.com/facebookresearch/detr
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[2:4]) < 0.7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
max_size = _max_by_axis([list(feat.shape) for feat in tensor_list])
batch_shape = [len(tensor_list)] + max_size
b, c, t = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, t), dtype=torch.bool, device=device)
for feat, pad_feat, m in zip(tensor_list, tensor, mask):
pad_feat[: feat.shape[0], : feat.shape[1]].copy_(feat)
m[: feat.shape[1]] = False
return NestedTensor(tensor, mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,world_size=args.world_size, rank=args.rank)
if not torch.cuda.is_available():
torch.distributed.barrier()
#torch.distributed.barrier(group=torch.distributed.group.WORLD)
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(5,10,20)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 13,447 | 30.716981 | 137 | py |
anticipatr | anticipatr-main/src/utils/segment_utils.py | import torch
import numpy as np
def segment_iou(target_segment,candidate_segments):
tt1 = torch.max(target_segment[0], candidate_segments[:, 0])
tt2 = torch.min(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clamp(min=0)
# Segment union.
segments_union = (candidate_segments[:,1] - candidate_segments[:,0]) + (target_segment[1] - target_segment[0]) - segments_intersection
tIoU = segments_intersection / segments_union
tIoU[torch.isnan(tIoU)] = 0
tIoU[torch.isinf(tIoU)] = 0
return tIoU
def generalized_segment_iou(target_segments,candidate_segments):
if candidate_segments.ndim !=2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = torch.zeros(n, m)
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
tiou[torch.isnan(tiou)] = 0
tiou[torch.isinf(tiou)] = 0
return torch.tensor(tiou,device=candidate_segments.device)
| 1,137 | 33.484848 | 139 | py |
anticipatr | anticipatr-main/pretraining/main_pretraining.py | import os
import argparse
import random
import numpy as np
import time
from pathlib import Path
import json
import datetime
import pickle
import torch
from torch.utils.data import DataLoader
import utils.misc as utils
from tasks import build_task
from engine_pretraining import train_one_epoch, evaluate
parser = argparse.ArgumentParser()
# dataset parameter
parser.add_argument('--dataset', type=str,default="bf")
parser.add_argument('--root',type=str,help='Path to data root directory')
parser.add_argument('--num_nouns',type=int,default=300)
parser.add_argument('--num_verbs',type=int,default=48)
parser.add_argument('--num_actions',type=int,default=100)
parser.add_argument('--num_future_labels',type=int,default=-1)
parser.add_argument('--task',type=str,default='anticipation',choices=['anticipation'])
parser.add_argument('--anticipation',type=str,default='longfuture',choices=['longfuture'])
parser.add_argument('--fps',type=int,default=60)
parser.add_argument('--label_type',type=str,default='verb',choices=['verb','noun','action'])
parser.add_argument('--train_many_shot',action='store_true',default=False,help='training with many shot verbs')
parser.add_argument('--split',type=int,default=1)
parser.add_argument('--train_timestamps',type=str,default='0.2,0.3,0.4,0.5,0.6,0.7,0.8')
parser.add_argument('--val_timestamps',type=str,default='0.25,0.5,0.75')
# Model parameters
parser.add_argument('--model',type=str,default='antr')
parser.add_argument('--num_queries',type=int,default=10)
parser.add_argument('--num_pos_embed_dict',type=int,default=256)
parser.add_argument('--dim_latent',type=int,default=128)
parser.add_argument('--hidden_dim',type=int,default=256)
parser.add_argument('--position_embedding',type=str,default='sine')
parser.add_argument('--num_decoder_embedding',type=int,default=10000)
parser.add_argument('--position_type',type=str,default='index',choices=['index'])
parser.add_argument('--dropout',type=float,default=0.1,help='transformer droput')
parser.add_argument('--nheads',type=int,default=8)
parser.add_argument('--dim_feedforward',type=int,default=2048)
parser.add_argument('--encoder',type=str,default='parallel')
parser.add_argument('--decoder',type=str,default='no_decoder')
parser.add_argument('--enc_layers',type=int,default=3)
parser.add_argument('--dec_layers',type=int,default=3)
parser.add_argument('--pre_norm',action='store_true')
parser.add_argument('--aux_loss',action='store_true')
parser.add_argument('--cuda',default=False,action='store_true',help='gpu mode')
parser.add_argument('--mp',action='store_true',help='gpu mode')
parser.add_argument('--eval',action='store_true',help='evaluation mode')
parser.add_argument('--norm_type',type=str,choices=['gn','bn'],default='bn',help="normalization type")
parser.add_argument('--activation',type=str,default='leaky_relu',help="transformer activation type")
# * Training
parser.add_argument('--pretraining_task',type=str)
parser.add_argument('--resume',type=str,default='',help='resume from a checkpoint')
parser.add_argument('--save_checkpoint_every',type=int,default=1000,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every_epoch',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--num_workers',type=int,default=0,help='number of workers')
parser.add_argument('--batch_size',type=int,default=8,help='batch_size')
parser.add_argument('--epochs',type=int,default=10,help='number of epochs')
parser.add_argument('--step_size',type=int,default=64,help='number of steps before backpropagation')
parser.add_argument('--start_epoch',type=int,default=0,help='starting epoch')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_joiner', default=0, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=1, type=float,help='gradient clipping max norm')
parser.add_argument('--output_dir', type=str,default='./pretraining_expts/checkpoints/',help='path to save intermediate checkpoints')
# * Distributed Training
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--device', default='cuda',help='device to use for training / testing')
args = parser.parse_args()
print(args)
def main(args):
bz = args.batch_size
lr = args.lr
if args.cuda:
if torch.cuda.device_count() >= 1:
utils.init_distributed_mode(args)
device = torch.device(args.device)
else:
device = torch.device('cpu')
# fix the seed for reproducibility
if args.cuda:
seed = args.seed + utils.get_rank()
else:
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# get task setup -- datasets, model, loss
dataset_train, dataset_test, model, criterion = build_task(args)
if args.cuda and args.distributed:
sampler_train = torch.utils.data.distributed.DistributedSampler(dataset_train,shuffle=True)
sampler_test = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_test = DataLoader(dataset_test, 1, sampler=sampler_test, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
model.to(device)
criterion.to(device)
model_without_ddp = model
if args.cuda and args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
model_with_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
# set up model training
param_dicts = [{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" not in n and p.requires_grad]},
{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" in n and p.requires_grad], "lr": args.lr_joiner,},]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# output and checkpoints directory
checkpoint_dir = args.output_dir
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
if args.resume:
checkpoint = Path(args.resume)
assert checkpoint.exists()
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
print("Start Training")
start_time = time.time()
optimizer.zero_grad()
for epoch in range(args.start_epoch, args.epochs):
if args.cuda and args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(epoch, args.clip_max_norm, model, criterion, data_loader_train, optimizer, lr_scheduler, args.dataset, device)
if args.output_dir:
checkpoint_dir = Path(checkpoint_dir)
checkpoint_paths = [checkpoint_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and a given frequency
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % args.save_checkpoint_every == 0:
checkpoint_paths.append(checkpoint_dir / f'checkpoint{epoch:05}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args,}, checkpoint_path)
# evaluation
if epoch % args.evaluate_every_epoch == 0:
test_stats = evaluate(epoch, model, criterion, data_loader_test, args.dataset, args.evaluate_every, device)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (checkpoint_dir / 'log.json').open("a") as f:
f.write(json.dumps(log_stats) + "\n")
lr_scheduler.step()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
main(args)
| 9,503 | 45.817734 | 208 | py |
anticipatr | anticipatr-main/pretraining/engine_pretraining.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import os,sys
import copy
import numpy as np
import math
from typing import Iterable
import time
import utils.misc as utils
import datasets
from metrics.longfuture_metrics import AnticipationEvaluator
def train_one_epoch(epoch, max_norm, model, criterion, data_loader, optimizer, scheduler, dataset, device):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
losses_mAP = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
losses_mAP = [{k:v[i] for k,v in losses_mAP.items()} for i in range(samples.tensors.size(0))]
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
loss_value.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist())+'_'+str(int(target['start_frame']))+'_'+str(int(target['end_frame'])): output for target, output in zip(targets,losses_mAP)}
predictions.update(res)
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation on training data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % 5 == 0:
evaluator = AnticipationEvaluator()
eval_stats = evaluator.evaluate(all_predictions)
stats = {k:v for k,v in eval_stats.items()}
train_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
train_stats.update(**stats)
print("Train epoch:", epoch, "Averaged stats:", train_stats)
return train_stats
def evaluate(epoch, model, criterion, data_loader, dataset, evaluate_every, device):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
losses_mAP = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
losses_mAP = [{k:v[i] for k,v in losses_mAP.items()} for i in range(samples.tensors.size(0))]
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
metric_logger.update(loss=losses_reduced_scaled.item(), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist())+'_'+str(int(target['start_frame']))+'_'+str(int(target['end_frame'])): output for target, output in zip(targets,losses_mAP)}
predictions.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation need to gather all data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % evaluate_every == 0:
evaluator = AnticipationEvaluator()
test_stats = evaluator.evaluate(all_predictions)
test_loss_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items() if 'mAP' not in k}
test_stats.update(**test_loss_stats)
print("Test epoch:", epoch, "Averaged test stats:", test_stats)
return test_stats
| 5,666 | 39.769784 | 208 | py |
anticipatr | anticipatr-main/pretraining/models/model.py | import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
import numpy as np
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class EncoderSnippetLongfutureAnticipation(nn.Module):
def __init__(self, joiner, transformer, dim_feedforward, num_classes, num_queries, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of action queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets, tgt_mask=None):
""" The forward expects two inputs:
- samples: batched videos features, of shape [batch_size x 2048 x T]
- mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized boxes coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions = torch.empty_like(mask) ## set for positional encodings
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
hs = self.transformer(input,mask, tgt_mask, self.query_embed.weight,pos)[0]
outputs_class = self.class_embed(hs)
out = {'pred_logits': outputs_class}
return out
class CriterionSnippetLongfutureAnticipation(nn.Module):
"""
This class is the implementation of multilabel classification loss.
"""
def __init__(self, num_classes, weight_dict, losses,fps):
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.losses = losses
def get_mAP(self,pred,labels,label_mask):
mAPs = dict()
mAPs['mAP'] = torch.cat((pred[:,label_mask[0]].detach().cpu(), labels[:,label_mask[0]].detach().cpu()),1)
for i in range(label_mask.shape[1]):
pred_i = pred[:, label_mask[0][i]].squeeze(1)
labels_i = labels[:, label_mask[0][i]].squeeze(1)
mAPs['AP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
return mAPs
def loss_labels(self,outputs, targets,log=True):
src_logits = torch.sigmoid(outputs['pred_logits'].mean(1))
target_classes = torch.cat([t['labels'].unsqueeze(0) for t in targets])
loss_ce = F.binary_cross_entropy(src_logits, target_classes,reduction='mean')
losses = {'loss_ce': loss_ce}
losses.update(self.get_mAP(src_logits, target_classes, targets[0]['label_mask']))
return losses
def get_loss(self, loss, outputs, targets, **kwargs):
loss_map = {
'labels': self.loss_labels
}
assert loss in loss_map, f'{loss} loss not defined'
return loss_map[loss](outputs,targets,**kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets))
if 'aux_outputs' in outputs:
for i,aux_outputs in enumerate(outputs['aux_outputs']):
for loss in self.losses:
kwargs = {}
kwargs = {'log' : False}
l_dict = self.get_loss(loss,aux_outputs,targets,**kwargs)
l_dict = {k + f'_{i}': v for k,v in l_dict.items()}
losses.update(l_dict)
return losses
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
if args.pretraining_task == 'snippet_longfuture_anticipation':
model = EncoderSnippetLongfutureAnticipation(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
num_classes=num_classes,
num_queries=1,
aux_loss=args.aux_loss,
)
losses = ['labels']
weight_dict = {'loss_ce': 1}
criterion = CriterionSnippetLongfutureAnticipation(num_classes=num_classes, weight_dict=weight_dict, losses=losses,fps=args.fps)
else:
print("unindentified pretraining task")
print(model)
return model, criterion
| 6,367 | 38.308642 | 136 | py |
anticipatr | anticipatr-main/pretraining/models/position_encoding.py | """
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
"""
Sinusoidal positional encodings based on sequence timestamps
"""
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,693 | 33.571429 | 97 | py |
anticipatr | anticipatr-main/pretraining/models/transformer.py | """
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, encoding='parallel', decoding='no_decoder', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if decoding != 'no_decoder':
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
pos_embed = pos_embed.permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory = self.encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
tgt_mask = None
if self.decoding != 'no_decoder':
hs = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_key_padding_mask=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0)
elif self.decoding == 'no_decoder':
return memory.permute(1,0,2), torch.empty(memory.size()).to(memory.device)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
encoding=args.encoder,
decoding=args.decoder,
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,622 | 39.458333 | 139 | py |
anticipatr | anticipatr-main/pretraining/models/joiner.py | """
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
if self.position_type == 'index' and self.position_encoding_type=='sine':
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 959 | 29.967742 | 118 | py |
anticipatr | anticipatr-main/pretraining/metrics/longfuture_metrics.py | import math
import numpy as np
import torch
import warnings
from collections import OrderedDict
warnings.filterwarnings("ignore", category=UserWarning)
import sklearn.metrics as skmetrics
class AnticipationEvaluator(object):
""" The pretraining task is multilabel classification problem."""
def __init__(self):
self.apmeter = OrderedDict()
self.output = OrderedDict()
self.accmeter = OrderedDict()
self.output['mAP_micro'] = []
self.output['mAP_macro'] = []
def get_AP_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
preds['mAP'] = []
targets = {}
targets['mAP'] = []
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'mAP' in k_ap:
preds[k_ap].append(v_ap[:v_ap.size(0)//2].numpy())
targets[k_ap].append(v_ap[v_ap.size(0)//2:].numpy())
for k_ap,v_ap in preds.items():
y_true = np.asarray([t for t in targets[k_ap]])
y_pred = np.asarray([p for p in preds[k_ap]])
if 'mAP' in k_ap:
self.output['mAP_macro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='macro'))
self.output['mAP_micro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='micro'))
def evaluate(self,predictions):
self.get_AP_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if 'mAP' in k:
metrics[k] = v
return metrics
| 1,809 | 31.909091 | 151 | py |
anticipatr | anticipatr-main/pretraining/datasets/bf.py | """
Builds a dataloader class for snippet-level anticipation task
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_snippetprediction import SequenceDatasetLongFuture
def build_bf_pretraining(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl"
label_type = 'verb'
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/bf/longfuture/annotations/bf_verbs.csv'}
pretraining_train_vids = "pretraining_data/bf/train_videos.txt"
pretraining_val_vids = "pretraining_data/bf/val_videos.txt"
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
val_timestamps = [float(t) for t in args.val_timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'train_vid_list': pretraining_train_vids,
'val_vid_list': pretraining_val_vids,
'num_verbs': 48,
'num_nouns': 1,
'num_actions': 1,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns,
'pretraining_task': args.pretraining_task,
'num_future_labels': args.num_future_labels
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,770 | 36.680851 | 130 | py |
anticipatr | anticipatr-main/pretraining/datasets/ek.py | import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_snippetprediction import SequenceDatasetLongFuture
def build_ek_pretraining(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl")
label_type = '' if args.label_type == 'action' else args.label_type
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/EPIC_many_shot_verbs.csv', 'noun':'data/ek/longfuture/annotations/EPIC_many_shot_nouns.csv'}
pretraining_train_vids = "pretraining_data/ek/train_videos.txt"
pretraining_val_vids = "pretraining_data/ek/val_videos.txt"
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
val_timestamps = [float(t) for t in args.val_timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'train_vid_list': pretraining_train_vids,
'val_vid_list': pretraining_val_vids,
'num_verbs': args.num_verbs ,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns,
'pretraining_task': args.pretraining_task,
'num_future_labels': args.num_future_labels
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,866 | 41.431818 | 157 | py |
anticipatr | anticipatr-main/pretraining/datasets/baseds_snippetprediction.py | """
Implementation of dataloader for snippet anticipation.
Code inspired by: https://github.com/facebookresearch/ego-topo
"""
import bisect
import copy
import os
import os.path as osp
import random
from functools import partial
import itertools
import numpy as np
import pickle as pkl
import collections
from collections import Sequence
import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from datasets import ds_utils
class DatasetSegmentRecord(object):
def __init__(self, row, clip_range=None):
self._data = row
self.clip_range = clip_range
@property
def path(self):
return self._data[0]
@property
def start_frame(self):
return int(self._data[1])
@property
def end_frame(self):
return int(self._data[2])
@property
def label(self):
return [int(x) for x in self._data[3:]]
@property
def num_frames(self):
return self.end_frame - self.start_frame + 1
@property
def clip_start_frame(self):
return int(self._data[1]) if self.clip_range is None else int(self.clip_range[0])
@property
def clip_end_frame(self):
return int(self._data[2]) if self.clip_range is None else int(self.clip_range[1])
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
class SequenceDatasetLongFuture(Dataset):
def __init__(self, feature_file, ann_file, label_type, test_mode, task, fps, dset, train_vid_list, val_vid_list, num_verbs, num_nouns, num_actions, train_many_shot=False, manyshot_annotations={}, num_future_labels=-1,**kwargs):
self.feature_file = feature_file
self.ann_file = ann_file
self.test_mode = test_mode
self.label = label_type
self.task = task
self.dset = dset
self.train_many_shot = train_many_shot
self.train_vid_list = train_vid_list
self.val_vid_list = val_vid_list
self.num_verbs = num_verbs
self.num_nouns = num_nouns
self.num_actions = num_actions
self.fps = fps
self.num_future_labels = num_future_labels
with open(feature_file,'rb') as f:
self.feature_data = pkl.load(f)
if train_many_shot:
manyshot_verbs = sorted(get_many_shot(manyshot_annotations['verb']))
manyshot_nouns = sorted(get_many_shot(manyshot_annotations['noun']))
if train_many_shot:
self.num_verbs, self.num_nouns = len(manyshot_verbs), len(manyshot_nouns)
self.manyshot_verbs, self.manyshot_nouns = manyshot_verbs, manyshot_nouns
else:
manyshot_nouns, manyshot_verbs = [],[]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(self.ann_file)]
if self.dset in ['ek','egtea']:
int_counts = [(record.label[0], record.label[1]) for record in records]
int_counts = collections.Counter(int_counts).items()
int_counts = sorted(int_counts, key=lambda x: -x[1])[0:self.num_actions]
self.int_to_idx = {interact:idx for idx, (interact, count) in enumerate(int_counts)}
else:
self.int_to_idx = {}
self.data = self.load_longfuture_anticipation_annotations(ann_file)
if train_many_shot:
for record in self.data:
record.verbs = [manyshot_verbs.index(x) for x in record.verbs if x in manyshot_verbs]
record.nouns = [manyshot_nouns.index(x) for x in record.nouns if x in manyshot_nouns]
# Only a few nouns/ints will actually have gt positives
# Pass these as part of the batch to evaluate mAP
# Don't know how to pass these in the config
eval_ints = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_ints |= set(record.ints)
eval_set = torch.zeros(1, self.num_actions)
eval_set[0, list(eval_ints)] = 1
self.eval_ints = eval_set.byte()
eval_nouns = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_nouns |= set(record.nouns)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
self.eval_nouns = eval_set.byte()
else:
eval_set = torch.zeros(3, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
manyshot = eval_nouns & set(manyshot_nouns)
rareshot = eval_nouns - set(manyshot_nouns)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_nouns = eval_set.byte()
else:
self.eval_ints = torch.zeros(1, self.num_actions).byte()
self.eval_nouns = torch.zeros(1, self.num_nouns).byte()
eval_verbs = set()
for record in self.data:
eval_verbs |= set(record.verbs)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
else:
eval_set = torch.zeros(3, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
manyshot = eval_verbs & set(manyshot_verbs)
rareshot = eval_verbs - set(manyshot_verbs)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_verbs = eval_set.byte()
self.prepare = RecordSnippetLongfutureAnticipationData(self.feature_data, self.dset, self.num_nouns, self.num_verbs, self.num_actions, self.int_to_idx, self.fps, self.label, self.eval_verbs, self.eval_nouns, self.eval_ints,self.test_mode)
def load_longfuture_anticipation_annotations(self, ann_file):
print("Loading longfuture anticipation annotations")
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
if self.test_mode:
vidfile = self.val_vid_list
else:
vidfile = self.train_vid_list
with open(vidfile,'rb') as f:
vid_list = [line.rstrip().decode() for line in f]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
if self.dset=='ek':
path = record.path.split('/')[-1]
else:
path = record.path
if path in vid_list:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if vid not in self.feature_data:
continue
for segment_idx, segment_record in enumerate(vrecords[:-2]):
record_length = segment_record.end_frame - segment_record.start_frame + 1
if not any(x in self.feature_data[vid].keys() for x in list(range(segment_record.start_frame, segment_record.end_frame+1))):
continue
if self.dset in ['bf']:
invalid_verbs = [0]
if record_length <= 15:
continue
if segment_record.label[0] == 0:
continue
if self.dset in ['salads']:
if record_length <= 15:
continue
invalid_verbs = [17, 18]
if segment_record.label[0] in [17,18]:
continue
else:
if record_length <= 1:
continue
invalid_verbs = []
# create snippet record: label has to be future labels
future_records = [record for record in vrecords[segment_idx+1:-1]]
record = segment_record
record.verbs = sorted(set([frec.label[0] for frec in future_records]))
if self.num_future_labels > 0:
record.verbs = record.verbs[:num_future_labels]
if self.dset in ['ek', 'egtea']:
record.nouns = sorted(set([frec.label[1] for frec in future_records]))
record.ints = sorted(set([self.int_to_idx[(frec.label[0], frec.label[1])] for frec in future_records if (frec.label[0], frec.label[1]) in self.int_to_idx]))
if self.num_future_labels > 0:
record.nouns = record.nouns[:num_future_labels]
record.ints = record.ints[:num_future_labels]
record.duration = record.end_frame - record.start_frame
record.fps = self.fps
records.append(record)
#if len(records) == 8: return records
print("Snippet based longfuture anticipation", len(records))
return records
def get_ann_info(self, idx):
return {
'path': self.data[idx].path,
'num_frames': self.data[idx].num_frames,
'label': self.data[idx].label
}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
vrecord = self.data[idx]
inputs, targets = self.prepare(vrecord)
return inputs, targets
class RecordSnippetLongfutureAnticipationData(object):
def __init__(self, feature_data, dset, num_nouns, num_verbs, num_actions, int_to_idx, fps, label_type, eval_verbs, eval_nouns, eval_actions,test_mode):
self.feature_data = feature_data
self.dset = dset
self.num_nouns = num_nouns
self.num_verbs = num_verbs
self.num_actions = num_actions
self.int_to_idx = int_to_idx
self.fps = fps
self.label_type = label_type
self.eval_verbs = eval_verbs
self.eval_nouns = eval_nouns
self.eval_actions = eval_actions
self.test_mode = test_mode
def __call__(self, vrecord):
## features of past records
vidname = vrecord.path
duration = vrecord.duration
features = []
for idx in range(vrecord.start_frame,vrecord.end_frame+1):
if idx in self.feature_data[vidname].keys():
if self.dset in ['ek', 'egtea']:
features.append(torch.tensor(self.feature_data[vidname][idx]))
if self.dset in ['bf','salads']:
# set snippet_fps to choose the sampling rate
snippet_fps = 1
if idx% snippet_fps ==0:
features.append(torch.tensor(self.feature_data[vidname][idx]))
features = torch.tensor(torch.stack(features),dtype=torch.float32).permute(1,0)
video_id = ds_utils.getVideoId(self.dset, vidname)
## output representation
set_targets = {}
set_targets['video_id'] = torch.tensor(video_id)
if self.label_type == 'action':
label = torch.zeros(self.num_actions)
label[vrecord.ints] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_actions)
elif self.label_type == 'noun':
label = torch.zeros(self.num_nouns)
label[vrecord.nouns] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_nouns)
elif self.label_type == 'verb':
label = torch.zeros(self.num_verbs)
label[vrecord.verbs] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_verbs)
set_targets['fps'] = torch.tensor([vrecord.fps],dtype=torch.float32)
set_targets['duration'] = torch.tensor([vrecord.duration/vrecord.fps],dtype=torch.float32)
set_targets['start_frame'] = torch.tensor([vrecord.start_frame],dtype=torch.float32)
set_targets['end_frame'] = torch.tensor([vrecord.end_frame],dtype=torch.float32)
return features, set_targets
| 12,985 | 38.956923 | 246 | py |
anticipatr | anticipatr-main/pretraining/datasets/__init__.py | import torch.utils.data
import torchvision
def build_dataset(args):
if args.dataset == 'ek':
from datasets.ek import build_ek_pretraining
dataset_train = build_ek_pretraining(args,mode='train')
dataset_val = build_ek_pretraining(args,mode='val')
return dataset_train, dataset_val
elif args.dataset == 'bf':
from datasets.bf import build_bf_pretraining
dataset_train = build_bf_pretraining(args,mode='train')
dataset_val = build_bf_pretraining(args,mode='val')
return dataset_train, dataset_val
| 568 | 34.5625 | 63 | py |
anticipatr | anticipatr-main/pretraining/utils/misc.py | """
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references and
https://github.com/facebookresearch/detr
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[2:4]) < 7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
max_size = _max_by_axis([list(feat.shape) for feat in tensor_list])
batch_shape = [len(tensor_list)] + max_size
b, c, t = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, t), dtype=torch.bool, device=device)
for feat, pad_feat, m in zip(tensor_list, tensor, mask):
pad_feat[: feat.shape[0], : feat.shape[1]].copy_(feat)
m[: feat.shape[1]] = False
return NestedTensor(tensor, mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
#torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,world_size=args.world_size, rank=args.rank)
torch.distributed.init_process_group(backend=args.dist_backend)
if not torch.cuda.is_available():
torch.distributed.barrier()
#torch.distributed.barrier(group=torch.distributed.group.WORLD)
#setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(5,10,20)):
"""Computes the precision@k for the specified values of k"""
import ipdb; ipdb.set_trace()
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 13,549 | 30.807512 | 138 | py |
anticipatr | anticipatr-main/pretraining/tasks/__init__.py | import torch
from datasets import build_dataset
from models import build_model
def build_task(args):
dataset_train,dataset_test = build_dataset(args)
model, criterion = build_model(args)
return dataset_train, dataset_test, model, criterion
| 257 | 18.846154 | 56 | py |
benchmarks | benchmarks-master/my_tests/reportLmdbError.py | from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from six.moves import xrange # pylint: disable=redefined-builtin
import lmdb
import PIL.Image
from StringIO import StringIO
# specify dataset path
path_prefix = '/mnt/terabyte/datasets/imagenet/caffe/ilsvrc12_'
path_postfix = '_lmdb'
supported_modes = ['train', 'val']
mode = supported_modes[0]
full_path = path_prefix + mode + path_postfix
# specify how many datums to read at once
batch_length = 11
# set numpy array print options
np.set_printoptions(threshold=21)
reader = tf.LMDBReader(name='reader')
keys_queue = tf.FIFOQueue(
capacity=32,
dtypes=[dtypes.string],
shapes=())
# scenario 1 (buggy)
keys1, values1 = reader.read_up_to(keys_queue, batch_length)
jpg_buffer1 = tf.decode_raw(values1, out_type=tf.uint8)
# scenario 2 (good)
keys2, values2 = reader.read_up_to(keys_queue, 11)
jpg_buffer2 = tf.decode_raw(values2, out_type=tf.uint8)
with tf.Session() as sess:
keys_queue.enqueue([full_path]).run()
keys_queue.close().run()
buffer2 = sess.run(jpg_buffer2)
print(buffer2.shape)
print(buffer2[0:20])
buffer1 = sess.run(jpg_buffer1)
print(buffer1.shape)
print(buffer1[:,0:20]) | 1,388 | 27.9375 | 65 | py |
benchmarks | benchmarks-master/my_tests/LmdbInputImagePreprocessor.py | # Copyright 2017 Ioannis Athanasiadis(supernlogn). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from six.moves import xrange # pylint: disable=redefined-builtin
import lmdb
import PIL.Image
from StringIO import StringIO
path_prefix = '/mnt/terabyte/datasets/imagenet/caffe/ilsvrc12_'
path_postfix = '_lmdb'
supported_modes = ['train', 'val']
mode = supported_modes[0]
full_path = path_prefix + mode + path_postfix
# def read_lmdb(lmdb_file):
# cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
# datum = caffe.proto.caffe_pb2.Datum()
# for _, value in cursor:
# datum.ParseFromString(value)
# s = StringIO()
# s.write(datum.data)
# s.seek(0)
# yield np.array(PIL.Image.open(s)), datum.label
# for im, label in read_lmdb(full_path):
# print label, im
np.set_printoptions(threshold='nan')
# env = lmdb.open(full_path, readonly=True)
keys_list = []
i = 1
# with env.begin() as txn:
# cursor = txn.cursor()
# for key, value in cursor:
# val = np.fromstring(value, dtype=np.uint8)
# print("env print: ", val.shape)
# label = val[12:17]
# print([bin(x)[2:].zfill(8) for x in label])
# keys_list.append(key)
# i = i +1
# if i >= 10:
# break
# print(key)
# env.close()
np.set_printoptions(threshold=20)
# print(int(value[0]))
# I1 = value.index( '\xFF\xD8' )
# I2 = value.index( '',I1)
# print(I1)
# value = np.fromstring(value,dtype=np.uint8)
# # print(value[I1:])
# header_data = value[0:17]
# print(np.size(value) - 256*256*3)
# img = value[17:]
# img = np.reshape(img, [256, 256, 3])
# imgToShow = PIL.Image.fromarray(img, 'RGB')
# imgToShow.save('tensImg.png')
# path_tensor = tf.p.aconvert_to_tensor(len(full_path), dtype=tf.int32)
# tf.random_shuffle(keys_tensor)
# tf.train.add_queue_runner(tf.train.QueueRunner(keys_queue,[kq_enqueue_op] * 1))
print(len(full_path))
reader = tf.LMDBReader(name='reader')
keys_queue = tf.FIFOQueue(
capacity=2,
dtypes=[dtypes.string],
shapes=())
# i = tf.Variable(initial_value=0, trainable=False, name="lmdb_iterator_var")
datum_size = 196625
vals = tf.zeros(shape=(1, datum_size), dtype=tf.uint8)
def in_body(in_iterator, vals):
vals = tf.concat(axis=0,
values=[vals,
tf.expand_dims(axis=0,
input=tf.decode_raw(reader.read(keys_queue)[1],
out_type=tf.uint8)[:])])
return in_iterator + 1, vals
in_i = []
in_while_reader = []
for i in range(0, 3):
in_i.append(tf.constant(0))
in_while_reader.append(tf.while_loop(cond=lambda i, vals: tf.less(i, 10),
body=in_body,
loop_vars=[in_i[-1], vals],
shape_invariants=[
in_i[-1].get_shape(), tf.TensorShape((None, datum_size))],
parallel_iterations=1))
def out_body(out_iterator, vals):
out_case = []
for i in range(0, 3):
out_case.append((tf.equal(out_iterator, i),
lambda: in_while_reader[i]))
r = tf.case(out_case, default=lambda: in_while_reader[0])
vals = tf.concat(axis=0,
values=[vals, r[1]])
return out_iterator + 1, vals
out_i = tf.constant(0)
out_while_reader = tf.while_loop(cond=lambda out_i, vals: tf.less(out_i, 2),
body=out_body,
loop_vars=[out_i, vals],
shape_invariants=[
out_i.get_shape(), tf.TensorShape((None, datum_size))],
parallel_iterations=1)
keys, values = reader.read(keys_queue)
jpg_buffer = tf.decode_raw(values, out_type=tf.uint8)
enqueue_op = keys_queue.enqueue([values])
# jpg_label = jpg_buffer[:,-5:-1]
# jpg_img = jpg_buffer[:,12:-5]
# jpg_img = tf.reshape(jpg_img, [32, 3, 256, 256])
# jpg_img = tf.transpose(jpg_img, [0,2,3,1])
# rev = tf.constant([2], dtype=tf.int32)
# jpg_img = tf.reverse(jpg_img, rev)
with tf.Session() as sess:
# keys_queue.enqueue([full_path]).run()
# keys_queue.close().run()
w = sess.run(enqueue_op)
print(w.shape)
# print(w)
# search if two rows are the same
for it1 in range(w.shape[0]):
ans = False
for it2 in range(it1 + 1, w.shape[0]):
if (np.array_equal(w[it1, :], w[it2, :])):
print("Found them: %d, %d" % (it1, it2))
# # coord = tf.train.Coordinator()
# # threads = tf.train.start_queue_runners(coord=coord)
# imgToShow = PIL.Image.fromarray(img, 'RGB')
# imgToShow.save('tensImg2.jpg')
# k,v = sess.run([keys, values])
# # print(k, v)
# print((len(v) - 2556*256*3))
# b = np.array(v)
# np.reshape(b,[256, 256, 3])
# # coord.request_stop()
# # coord.join(threads)
| 5,750 | 29.754011 | 101 | py |
Geometric_Transformation_CMR | Geometric_Transformation_CMR-main/dataloader.py | import random
import shutil
import cv2
import torch
from PIL import Image
from matplotlib import pylab as plt
import nibabel as nib
from nibabel import nifti1
import torchvision
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import os
import numpy as np
class MyData(Dataset):
def __init__(self, root_dir, transform=None):
self.root_dir = root_dir
self.img_path = os.listdir(self.root_dir)
self.transform = transform
self.classes2d = ['00', '01', '02', '03', '10', '11', '12', '13']
def __getitem__(self, idx):
img_name = self.img_path[idx]
label = img_name.split('@')[-1][0:-4]
label_tensor = torch.zeros(8)
label_tensor[self.classes2d.index(label)] = 1.0
img_item_path = os.path.join(self.root_dir, img_name)
img_idx = np.array(Image.open(img_item_path), dtype='uint8')
# 自适应直方图均衡
img_idx = img_idx.reshape(3,img_idx.shape[0],img_idx.shape[1])
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img_res = np.zeros_like(img_idx)
for i in range(img_idx.shape[0]):
img_res[i,:, :] = clahe.apply(img_idx[i,:, :])
img_res = Image.fromarray(img_res.reshape(img_res.shape[1],img_res.shape[2],3))
# 作用transform
if self.transform is not None:
img_res = self.transform(img_res)
return img_res, label_tensor
def __len__(self):
return len(self.img_path)
class GenericData:
def __init__(self, save_path, load_path, split_ratio, dim):
self.save_path = save_path
self.load_path = load_path
self.split_ratio = split_ratio #[0.8,0.2]
self.dim = dim
self.classes2d = ['00', '01', '02', '03', '10', '11', '12', '13']
def generic_data(self):
train_save_path = os.path.join(self.save_path, 'train')
test_save_path = os.path.join(self.save_path, 'test')
for path in [train_save_path, test_save_path]:
if os.path.exists(path): # 判断文件夹是否存在,如果存在,先清空
shutil.rmtree(path)
os.makedirs(path) # 新增空文件夹
if self.dim == 2:
classes = self.classes2d
else:
raise ValueError("需要对3d图像进行变换吗?")
img_path = os.listdir(self.load_path)
img_path_all = dict()
for img_name in img_path:
img_allqueue = nib.load(os.path.join(self.load_path, img_name))
width, height, queue = img_allqueue.dataobj.shape
for i in range(queue):
img = img_allqueue.dataobj[:,:,i]
for k in range(self.dim * 4):
axis_flip = int(classes[k][0])
rotation = int(classes[k][1]) * 90
img_path_all[img_name + '@{}@{}'.format(i, classes[k])] = Geo_Transform_img(img, axis_flip,rotation)
img_train, img_test = self.dict_split_shuffle(img_path_all)
for key in img_train:
plt.imsave(os.path.join(train_save_path,f'{key}.jpg'), img_train[key], cmap='gray')
for key in img_test:
plt.imsave(os.path.join(test_save_path,f'{key}.jpg'), img_test[key], cmap='gray')
def dict_split_shuffle(self, img_path_all):
tr_size = int(len(img_path_all) * self.split_ratio[0])
keys = list(img_path_all.keys())
random.shuffle(keys)
img_train = dict([(i,img_path_all[i]) for i in keys[:tr_size]])
img_test = dict([(i, img_path_all[i]) for i in keys[tr_size:]])
return img_train, img_test
def show_img(path):
img = nib.load(path)
width, height, queue = img.dataobj.shape
num = 1
for i in range(queue):
img_arry = img.dataobj[:, :, i]
plt.subplot(2, 3, num)
plt.imshow(img_arry, cmap='gray')
num += 1
plt.show()
def rotate_img(img, rot, axes):
"""
:param img: Array of two or more dimensions.
:param rot: Degrees of the array is rotated.
:param axes: The array is rotated in the plane defined by the axes.
Axes must be different.(0,1),(1,2),(0,2)
"""
if rot in [0, 90, 180, 270]:
k = rot / 90
return np.rot90(img, k, axes)
else:
raise ValueError('rotation should be 0, 90, 180, or 270')
def Geo_Transform_img(img, axis_flip, rotation):
"""
:param img: Array of two or three dimensions.
:param axis_flip: int, how many aixs should be fipped.
:param rotation: rotation degrees in [0,90,180,270]
:return:
"""
if axis_flip == 0: # 没有坐标轴翻转
return rotate_img(img, rotation, (0, 1))
elif axis_flip == 1: # 有一个坐标轴翻转
img = np.transpose(img)
return rotate_img(img, rotation, (0, 1))
# elif axis_flip == 2: # 有两个坐标轴翻转(说明是3d的情形)
# img = np.transpose(img)
# return rotate_img(img, rotation, (0, 2))
# Set the paths of the datasets.
MyoPS_C0_dir = 'datasets\MyoPS\C0'
MyoPS_LGE_dir = 'datasets\MyoPS\LGE'
MyoPS_T2_dir = 'datasets\MyoPS\T2'
MyoPS_C0_split_dir = 'datasets\MyoPS\C0_split'
MyoPS_LGE_split_dir = 'datasets\MyoPS\LGE_split'
MyoPS_T2_split_dir = 'datasets\MyoPS\T2_split'
data_generate = GenericData(save_path=MyoPS_T2_split_dir,load_path=MyoPS_T2_dir,split_ratio=[0.8,0.2],dim=2)
data_generate.generic_data() | 5,273 | 34.635135 | 120 | py |
Geometric_Transformation_CMR | Geometric_Transformation_CMR-main/GeoNet.py | import torch
from torch import nn
from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear, BatchNorm2d, ReLU, BatchNorm1d
class GeoNet(nn.Module):
def __init__(self):
super(GeoNet, self).__init__()
self.conv1 = Conv2d(1, 32, kernel_size=5, padding=2)
self.conv2 = Conv2d(32, 64, kernel_size=3, padding=1)
self.conv3 = Conv2d(64, 128, kernel_size=3, padding=1)
self.conv4 = Conv2d(128, 64, kernel_size=3, padding=1)
self.model = nn.Sequential(
self.conv1,
BatchNorm2d(32),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
self.conv2,
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
self.conv3,
BatchNorm2d(128),
ReLU(),
self.conv4,
BatchNorm2d(64),
ReLU(),
MaxPool2d(kernel_size=2, stride=2),
Flatten(),
Linear(64 * 32 * 32, 256),
BatchNorm1d(256),
ReLU(),
Linear(256, 8)
)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__': ##习惯在这个地方测试模型的正确性
model = GeoNet()
print(model)
input = torch.ones((16, 1, 256, 256))
output = model(input)
print(output.shape)
| 1,344 | 27.020833 | 99 | py |
Geometric_Transformation_CMR | Geometric_Transformation_CMR-main/OtherExperiment.py | from torchvision.transforms import transforms
from dataloader import *
from GeoNet import *
def predict(model):
model.eval()
total_LGE_accuracy = 0
total_C0_accuracy = 0
data_aug = transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
])
image_datasets_LGE = MyData(os.path.join('datasets\MyoPS\LGE_split','train'), data_aug)+MyData(os.path.join('datasets\MyoPS\LGE_split','test'), data_aug)
image_datasets_C0 = MyData(os.path.join('datasets\MyoPS\C0_split', 'train'), data_aug) + MyData(os.path.join('datasets\MyoPS\C0_split', 'test'), data_aug)
data_loaders_LGE = torch.utils.data.DataLoader(image_datasets_LGE, batch_size=16, shuffle=True, num_workers=0,drop_last=True)
data_loaders_C0 = torch.utils.data.DataLoader(image_datasets_C0, batch_size=16, shuffle=True, num_workers=0,drop_last=True)
with torch.no_grad():
for data in data_loaders_LGE:
images1, targets1 = data
outputs = model(images1)
accuracy = (outputs.argmax(1) == targets1.argmax(1)).sum()
total_LGE_accuracy += accuracy
for data in data_loaders_C0:
images2, targets2 = data
outputs = model(images2)
accuracy = (outputs.argmax(1) == targets2.argmax(1)).sum()
total_C0_accuracy += accuracy
return total_LGE_accuracy,total_C0_accuracy
if __name__ == '__main__':
model = GeoNet() # 要先创建模型框架,再加载之前的状态参数
model.load_state_dict(torch.load("GeoNet_MyoPS_T2.pth"))
total_LGE_accuracy,total_C0_accuracy = predict(model)
print("LGE:{},C0:{}".format(total_LGE_accuracy/1392,total_C0_accuracy/1392)) | 1,827 | 44.7 | 158 | py |
Geometric_Transformation_CMR | Geometric_Transformation_CMR-main/train.py | import cv2
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from dataloader import *
from GeoNet import *
from d2l import torch as d2l
def train(image_datasets, data_loaders, epochs, learning_rate, wt_decay):
train_data_size = len(image_datasets['train'])
test_data_size = len(image_datasets['test'])
print(train_data_size,test_data_size)
train_dataloader = data_loaders['train']
test_dataloader = data_loaders['test']
# 实例化网络模型
model = GeoNet()
# 损失函数
loss_fn = nn.CrossEntropyLoss()
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=wt_decay)
# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0
# 添加tensorboard
writer = SummaryWriter("logs_train3")
for i in range(epochs):
print("----------第{}轮训练开始了---------".format(i + 1))
model.train()
total_train_loss = 0
# 训练步骤开始
for data in train_dataloader:
images, targets = data
outputs = model(images)
loss = loss_fn(outputs, targets)
total_train_loss += loss.item()
total_train_step += 1
# 优化器优化模型
optimizer.zero_grad()
loss.backward()
optimizer.step()
if total_train_step % 5 == 0:
print("训练次数:{},loss:{}".format(total_train_step, loss.item()))
writer.add_scalar("train_loss", total_train_loss, i+1)
# 测试步骤开始
model.eval()
total_train_accuracy = 0
total_test_accuracy = 0
total_test_loss = 0
with torch.no_grad():
for data in test_dataloader:
images1, targets1 = data
outputs = model(images1)
loss = loss_fn(outputs, targets1)
total_test_loss += loss.item()
accuracy = (outputs.argmax(1) == targets1.argmax(1)).sum()
total_test_accuracy += accuracy
print("在{}轮训练后,整体测试集合上的accuracy:{}".format(i+1, total_test_accuracy / test_data_size))
writer.add_scalar("test_accuracy", total_test_accuracy / test_data_size, i+1)
writer.add_scalar("test_loss", total_test_loss, i + 1)
for data in train_dataloader:
images2, targets2 = data
outputs = model(images2)
accuracy = (outputs.argmax(1) == targets2.argmax(1)).sum()
total_train_accuracy += accuracy
print("在{}轮训练后,整体训练集合上的accuracy:{}".format(i+1, total_train_accuracy / train_data_size))
writer.add_scalar("train_accuracy", total_train_accuracy / train_data_size, i+1)
writer.close()
return model
def main():
MyoPS_C0_split_dir = 'datasets\MyoPS\C0_split'
MyoPS_LGE_split_dir = 'datasets\MyoPS\LGE_split'
MyoPS_T2_split_dir = 'datasets\MyoPS\T2_split'
data_transforms = {
'train': transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
]),
'test': transforms.Compose([
transforms.ToTensor(),
transforms.Grayscale(num_output_channels=1),
transforms.RandomRotation(10),
#transforms.Resize((256,256))
transforms.RandomResizedCrop((256, 256), scale=(0.7, 1), ratio=(0.8, 1.2))
])
}
image_datasets = {
x: MyData(os.path.join(MyoPS_T2_split_dir, x), data_transforms[x])
for x in ['train', 'test']
}
data_loaders = {
x: torch.utils.data.DataLoader(image_datasets[x], batch_size=16, shuffle=True, num_workers=0,drop_last=True)
for x in ['train', 'test']
}
model = train(image_datasets, data_loaders, epochs=32, learning_rate=0.01, wt_decay=0)
torch.save(model.state_dict(), "GeoNet_MyoPS_T2.pth")
if __name__ == '__main__':
main()
| 4,003 | 33.817391 | 116 | py |
ZeCon | ZeCon-main/optimization/losses.py | # PatchNCE loss from https://github.com/taesungp/contrastive-unpaired-translation
from torch.nn import functional as F
import torch
import numpy as np
import torch.nn as nn
def d_clip_loss(x, y, use_cosine=False):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
if use_cosine:
distance = 1 - (x @ y.t()).squeeze()
else:
distance = (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
return distance
def d_clip_dir_loss(x_embd,y_embd,prompt_x_embd,prompt_y_embd):
d_img = x_embd - y_embd
d_txt = prompt_x_embd - prompt_y_embd
d_img = F.normalize(d_img, dim=-1)
d_txt = F.normalize(d_txt, dim=-1)
distance = 1 - (d_img @ d_txt.t()).squeeze()
return distance
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
def mse_loss(x_in, y_in):
mse = torch.nn.MSELoss()
return mse(x_in,y_in)
def get_features(image, model, layers=None):
if layers is None:
layers = {'0': 'conv1_1',
'2': 'conv1_2',
'5': 'conv2_1',
'7': 'conv2_2',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2',
'28': 'conv5_1',
'31': 'conv5_2'
}
features = {}
x = image
for name, layer in model._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
class Normalize(nn.Module):
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm + 1e-7)
return out
def zecon_loss_direct(Unet, x_in, y_in,t):
total_loss = 0
nce_layers = [0,2,5,8,11]
num_patches=256
l2norm = Normalize(2)
feat_q = Unet.forward_enc(x_in,t, nce_layers)
feat_k = Unet.forward_enc(y_in,t, nce_layers)
patch_ids = []
feat_k_pool = []
feat_q_pool = []
for feat_id, feat in enumerate(feat_k):
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # [B,ch,h,w] > [B,h*w,ch]
patch_id = np.random.permutation(feat_reshape.shape[1])
patch_id = patch_id[:int(min(num_patches, patch_id.shape[0]))] # .to(patch_ids.device)
patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
patch_ids.append(patch_id)
x_sample = l2norm(x_sample)
feat_k_pool.append(x_sample)
for feat_id, feat in enumerate(feat_q):
feat_reshape = feat.permute(0, 2, 3, 1).flatten(1, 2) # [B,ch,h,w] > [B,h*w,ch]
patch_id = patch_ids[feat_id]
patch_id = torch.tensor(patch_id, dtype=torch.long, device=feat.device)
x_sample = feat_reshape[:, patch_id, :].flatten(0, 1) # reshape(-1, x.shape[1])
x_sample = l2norm(x_sample)
feat_q_pool.append(x_sample)
for f_q, f_k in zip(feat_q_pool, feat_k_pool):
loss = PatchNCELoss(f_q, f_k)
total_loss += loss.mean()
return total_loss.mean()
def PatchNCELoss(feat_q, feat_k, batch_size=1, nce_T = 0.07):
# feat_q : n_patch x 512
# feat_q : n_patch x 512
batch_size = batch_size
nce_T = nce_T
cross_entropy_loss = torch.nn.CrossEntropyLoss(reduction='none')
mask_dtype = torch.bool
num_patches = feat_q.shape[0]
dim = feat_q.shape[1]
feat_k = feat_k.detach()
# pos logit
l_pos = torch.bmm(
feat_q.view(num_patches, 1, -1), feat_k.view(num_patches, -1, 1))
l_pos = l_pos.view(num_patches, 1)
# reshape features to batch size
feat_q = feat_q.view(batch_size, -1, dim)
feat_k = feat_k.view(batch_size, -1, dim)
npatches = feat_q.size(1)
l_neg_curbatch = torch.bmm(feat_q, feat_k.transpose(2, 1))
# diagonal entries are similarity between same features, and hence meaningless.
# just fill the diagonal with very small number, which is exp(-10) and almost zero
diagonal = torch.eye(npatches, device=feat_q.device, dtype=mask_dtype)[None, :, :]
l_neg_curbatch.masked_fill_(diagonal, -10.0)
l_neg = l_neg_curbatch.view(-1, npatches)
out = torch.cat((l_pos, l_neg), dim=1) / nce_T
loss = cross_entropy_loss(out, torch.zeros(out.size(0), dtype=torch.long,
device=feat_q.device))
return loss
| 4,600 | 29.879195 | 95 | py |
ZeCon | ZeCon-main/optimization/augmentations.py | import torch
from torch import nn
import kornia.augmentation as K
# import ipdb
class ImageAugmentations(nn.Module):
def __init__(self, output_size, aug_prob, p_min, p_max, patch=False):
super().__init__()
self.output_size = output_size
self.aug_prob = aug_prob
self.patch = patch
self.augmentations = nn.Sequential(
K.RandomAffine(degrees=15, translate=0.1, p=aug_prob, padding_mode="border"), # type: ignore
K.RandomPerspective(0.7, p=aug_prob),
)
self.random_patch = K.RandomResizedCrop(size=(128,128), scale=(p_min,p_max))
self.avg_pool = nn.AdaptiveAvgPool2d((self.output_size, self.output_size))
def forward(self, input, num_patch=None, is_global=False):
"""Extents the input batch with augmentations
If the input is consists of images [I1, I2] the extended augmented output
will be [I1_resized, I2_resized, I1_aug1, I2_aug1, I1_aug2, I2_aug2 ...]
Args:
input ([type]): input batch of shape [batch, C, H, W]
Returns:
updated batch: of shape [batch * augmentations_number, C, H, W]
"""
if self.patch:
if is_global:
input = input.repeat(num_patch,1,1,1)
else:
input_patches = []
for i in range(num_patch):
if self.aug_prob > 0.0:
tmp = self.augmentations(self.random_patch(input))
else:
tmp = self.random_patch(input)
input_patches.append(tmp)
input = torch.cat(input_patches,dim=0)
else:
input_patches = []
for i in range(num_patch):
input_patches.append(self.augmentations(input))
input = torch.cat(input_patches,dim=0)
resized_images = self.avg_pool(input)
return resized_images
| 1,974 | 34.267857 | 105 | py |
ZeCon | ZeCon-main/optimization/image_editor_zecon.py | import os
from pathlib import Path
from optimization.constants import ASSETS_DIR_NAME
from utils.metrics_accumulator import MetricsAccumulator
from numpy import random
from optimization.augmentations import ImageAugmentations as ImageAugmentations
from PIL import Image
import torch
from torchvision import transforms
import torchvision.transforms.functional as F
from torchvision.transforms import functional as TF
from torch.nn.functional import mse_loss
from optimization.losses import range_loss, d_clip_loss, d_clip_dir_loss, mse_loss, get_features, zecon_loss_direct
import numpy as np
from CLIP import clip
from guided_diffusion.guided_diffusion.script_util import (
create_model_and_diffusion,
model_and_diffusion_defaults,
)
from torchvision import models
from utils.visualization import show_edited_masked_image
import matplotlib.pyplot as plt
# import ipdb
class ImageEditor:
def __init__(self, args) -> None:
self.args = args
os.makedirs(self.args.output_path, exist_ok=True)
if self.args.export_assets:
self.assets_path = Path(os.path.join(self.args.output_path, ASSETS_DIR_NAME))
os.makedirs(self.assets_path, exist_ok=True)
if self.args.seed is not None:
torch.manual_seed(self.args.seed)
np.random.seed(self.args.seed)
random.seed(self.args.seed)
self.model_config = model_and_diffusion_defaults(self.args)
# Load models
self.device = torch.device(
f"cuda:{self.args.gpu_id}" if torch.cuda.is_available() else "cpu"
)
print("Using device:", self.device)
if self.args.data == 'imagenet':
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(
torch.load(
"./ckpt/256x256_diffusion_uncond.pt",
map_location="cpu",
)
)
elif self.args.data == 'ffhq':
self.model_config.update(
{
"num_channels": 128,
"num_head_channels": 64,
"num_res_blocks":1,
"attention_resolutions": "16",
"resblock_updown": True,
"use_fp16": False,
}
)
self.model, self.diffusion = create_model_and_diffusion(**self.model_config)
self.model.load_state_dict(
torch.load(
# "./ckpt/ffhq_10m.pt",
"./ckpt/ffhq_baseline.pt",
map_location="cpu",
)
)
self.model.requires_grad_(False).eval().to(self.device)
for name, param in self.model.named_parameters():
if "qkv" in name or "norm" in name or "proj" in name:
param.requires_grad_()
if self.model_config["use_fp16"]:
self.model.convert_to_fp16()
self.clip_model = (
clip.load("ViT-B/16", device=self.device, jit=False)[0].eval().requires_grad_(False)
)
self.clip_size = self.clip_model.visual.input_resolution
self.clip_normalize = transforms.Normalize(
mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711]
)
self.image_augmentations = ImageAugmentations(224, self.args.aug_prob, self.args.patch_min, self.args.patch_max, patch=False)
self.patch_augmentations = ImageAugmentations(224, self.args.aug_prob, self.args.patch_min, self.args.patch_max, patch=True)
self.metrics_accumulator = MetricsAccumulator()
if self.args.l_vgg > 0:
self.vgg = models.vgg19(pretrained=True).features
self.vgg.to(self.device)
self.vgg.eval().requires_grad_(False)
self.vgg_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
def unscale_timestep(self, t):
unscaled_timestep = (t * (self.diffusion.num_timesteps / 1000)).long()
return unscaled_timestep
def clip_global_loss(self,x_in,text_embed):
clip_loss = torch.tensor(0)
augmented_input = self.image_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in = self.clip_normalize(augmented_input)
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = d_clip_loss(image_embeds, text_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_global_patch_loss(self, x_in, text_embed):
clip_loss = torch.tensor(0)
augmented_input = self.patch_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in = self.clip_normalize(augmented_input)
image_embeds = self.clip_model.encode_image(clip_in).float()
dists = d_clip_loss(image_embeds, text_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_dir_loss(self, x_in, y_in, text_embed, text_y_embed):
clip_loss = torch.tensor(0)
augmented_input_x = self.image_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
augmented_input_y = self.image_augmentations(y_in,num_patch=self.args.n_patch).add(1).div(2)
clip_in_x = self.clip_normalize(augmented_input_x)
clip_in_y = self.clip_normalize(augmented_input_y)
image_embeds_x = self.clip_model.encode_image(clip_in_x).float()
image_embeds_y = self.clip_model.encode_image(clip_in_y).float()
dists = d_clip_dir_loss(image_embeds_x, image_embeds_y, text_embed, text_y_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def clip_dir_patch_loss(self, x_in, y_in, text_embed, text_y_embed):
clip_loss = torch.tensor(0)
augmented_input_x = self.patch_augmentations(x_in,num_patch=self.args.n_patch).add(1).div(2)
augmented_input_y = self.patch_augmentations(y_in,num_patch=self.args.n_patch,is_global=True).add(1).div(2)
clip_in_x = self.clip_normalize(augmented_input_x)
clip_in_y = self.clip_normalize(augmented_input_y)
image_embeds_x = self.clip_model.encode_image(clip_in_x).float()
image_embeds_y = self.clip_model.encode_image(clip_in_y).float()
dists = d_clip_dir_loss(image_embeds_x, image_embeds_y, text_embed, text_y_embed)
for i in range(self.args.batch_size):
clip_loss = clip_loss + dists[i :: self.args.batch_size].mean()
return clip_loss
def zecon_loss(self, x_in, y_in, t):
loss = zecon_loss_direct(self.model, x_in, y_in, torch.zeros_like(t,device=self.device))
return loss.mean()
def mse_loss(self,x_in, y_in):
loss = mse_loss(x_in,y_in)
return loss.mean()
def vgg_loss(self,x_in, y_in):
content_features = get_features(self.vgg_normalize(x_in), self.vgg)
target_features = get_features(self.vgg_normalize(y_in), self.vgg)
loss = 0
loss += torch.mean((target_features['conv1_1'] - content_features['conv1_1']) ** 2)
loss += torch.mean((target_features['conv2_1'] - content_features['conv2_1']) ** 2)
# loss += torch.mean((target_features['conv4_2'] - content_features['conv4_2']) ** 2)
# loss += torch.mean((target_features['conv5_2'] - content_features['conv5_2']) ** 2)
return loss.mean()
def edit_image_by_prompt(self):
text_embed = self.clip_model.encode_text(
clip.tokenize(self.args.prompt_tgt).to(self.device)
).float()
text_y_embed = self.clip_model.encode_text(
clip.tokenize(self.args.prompt_src).to(self.device)
).float()
self.image_size = (self.model_config["image_size"], self.model_config["image_size"])
self.init_image_pil = Image.open(self.args.init_image).convert("RGB")
self.init_image_pil = self.init_image_pil.resize(self.image_size, Image.LANCZOS) # type: ignore
self.init_image = (
TF.to_tensor(self.init_image_pil).to(self.device).unsqueeze(0).mul(2).sub(1)
)
visualization_path = visualization_path = Path(
os.path.join(self.args.output_path, self.args.output_file)
)
def cond_fn(x, t, y=None):
if self.args.prompt_tgt == "":
return torch.zeros_like(x)
with torch.enable_grad():
x = x.detach().requires_grad_()
t = self.unscale_timestep(t)
out = self.diffusion.p_mean_variance(
self.model, x, t, clip_denoised=False, model_kwargs={"y": y}
)
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[t[0].item()]
x_in = out["pred_xstart"] * fac + x * (1 - fac)
loss = torch.tensor(0)
if self.args.l_clip_global != 0:
clip_loss = self.clip_global_loss(x_in, text_embed) * self.args.l_clip_global
loss = loss + clip_loss
self.metrics_accumulator.update_metric("clip_loss", clip_loss.item())
if self.args.l_clip_global_patch != 0:
clip_patch_loss = self.clip_global_patch_loss(x_in, text_embed) * self.args.l_clip_global_patch
loss = loss + clip_patch_loss
self.metrics_accumulator.update_metric("clip_patch_loss", clip_patch_loss.item())
if self.args.l_clip_dir != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
clip_dir_loss = self.clip_dir_loss(x_in, y_in, text_embed, text_y_embed) * self.args.l_clip_dir
loss = loss + clip_dir_loss
self.metrics_accumulator.update_metric("clip_dir_loss", clip_dir_loss.item())
if self.args.l_clip_dir_patch != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
clip_dir_patch_loss = self.clip_dir_patch_loss(x_in, y_in, text_embed, text_y_embed) * self.args.l_clip_dir_patch
loss = loss + clip_dir_patch_loss
self.metrics_accumulator.update_metric("clip_dir_patch_loss", clip_dir_patch_loss.item())
if self.args.l_zecon != 0:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
zecon_loss = self.zecon_loss(x_in, y_in,t) * self.args.l_zecon
loss = loss + zecon_loss
self.metrics_accumulator.update_metric("zecon_loss", zecon_loss.item())
if self.args.l_mse != 0 and t.item() < 700:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
mse_loss = self.mse_loss(x_in, y_in) * self.args.l_mse
loss = loss + mse_loss
self.metrics_accumulator.update_metric("mse_loss", mse_loss.item())
if self.args.l_vgg != 0 and t.item() < 800:
y_t = self.diffusion.q_sample(self.init_image,t)
y_in = self.init_image * fac + y_t * (1 - fac)
vgg_loss = self.vgg_loss(x_in, y_in) * self.args.l_vgg
loss = loss + vgg_loss
self.metrics_accumulator.update_metric("vgg_loss", vgg_loss.item())
if self.args.range_lambda != 0:
r_loss = range_loss(out["pred_xstart"]).sum() * self.args.range_lambda
loss = loss + r_loss
self.metrics_accumulator.update_metric("range_loss", r_loss.item())
return -torch.autograd.grad(loss, x)[0]
save_image_interval = self.diffusion.num_timesteps // 5
for iteration_number in range(self.args.iterations_num):
fw = self.args.diffusion_type.split('_')[0]
bk = self.args.diffusion_type.split('_')[-1]
# Forward DDIM
if fw == 'ddim':
print("Forward Process to noise")
noise = self.diffusion.ddim_reverse_sample_loop(
self.model,
self.init_image,
clip_denoised=False,
skip_timesteps=self.args.skip_timesteps,
)
# Forward DDPM
elif fw == 'ddpm':
init_image_batch = torch.tile(self.init_image, dims=(self.args.batch_size, 1, 1, 1))
noise = self.diffusion.q_sample(
x_start=init_image_batch,
t=torch.tensor(self.diffusion.num_timesteps-int(self.args.skip_timesteps), dtype=torch.long, device=self.device),
noise=torch.randn((self.args.batch_size,3,self.model_config["image_size"],self.model_config["image_size"]), device=self.device),
)
else:
raise ValueError
# Reverse DDPM
if bk == 'ddpm':
samples = self.diffusion.p_sample_loop_progressive(
self.model,
(
self.args.batch_size,
3,
self.model_config["image_size"],
self.model_config["image_size"],
),
noise = noise if fw=='ddim' else None,
clip_denoised=False,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=self.args.skip_timesteps,
init_image=self.init_image,
)
# Reverse DDIM
elif bk == 'ddim':
samples = self.diffusion.ddim_sample_loop_progressive(
self.model,
(
self.args.batch_size,
3,
self.model_config["image_size"],
self.model_config["image_size"],
),
noise = noise,
clip_denoised=False,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=self.args.skip_timesteps,
eta=self.args.eta,
)
else:
raise ValueError
intermediate_samples = [[] for i in range(self.args.batch_size)]
total_steps = self.diffusion.num_timesteps - self.args.skip_timesteps - 1
for j, sample in enumerate(samples):
should_save_image = j % save_image_interval == 0 or j == total_steps
if should_save_image or self.args.save_video:
self.metrics_accumulator.print_average_metric()
for b in range(self.args.batch_size):
pred_image = sample["pred_xstart"][b]
pred_image = pred_image.add(1).div(2).clamp(0, 1)
pred_image_pil = TF.to_pil_image(pred_image)
filename = Path(self.args.init_image).stem
visualization_path = visualization_path.with_name(
f"{filename}_{self.args.prompt_tgt}_{iteration_number}{visualization_path.suffix}"
)
if self.args.export_assets:
pred_path = self.assets_path / visualization_path.name
pred_image_pil.save(pred_path)
intermediate_samples[b].append(pred_image_pil)
if should_save_image:
show_edited_masked_image(
title=self.args.prompt_tgt,
source_image=self.init_image_pil,
edited_image=pred_image_pil,
path=visualization_path,
)
visualization_path2 = str(visualization_path).replace('.png','_output.png')
pred_image_arr = np.array(pred_image_pil)
plt.imsave(visualization_path2, pred_image_arr)
| 17,010 | 43.648294 | 152 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.