repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
MRE-ISE | MRE-ISE-main/cores/gene/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch_geometric.utils import dense_to_sparse
from cores.gene.backbone import GAT, FustionLayer, GraphLearner
from cores.lamo.decoding_network import DecoderNetwork
class MRE(nn.Module):
def __init__(self, args, vision_config, text_config, vision_model, text_model, num_labels,
text_bow_size, visual_bow_size, tokenizer, processor):
super(MRE, self).__init__()
self.args = args
self.hid_size = args.hid_size
self.num_layers = args.num_layers
self.num_labels = num_labels
# encode image and text
self.vision_model = vision_model
self.text_model = text_model
self.tokenizer = tokenizer
self.processor = processor
self.device = torch.device(args.device)
# construct cross-modal graph
self.text_linear = nn.Linear(text_config.hidden_size, args.hid_size)
self.vision_linear = nn.Linear(vision_config.hidden_size, args.hid_size)
self.fuse = FustionLayer(args.hid_size)
self.cross_GAT_layers = GAT(args.hid_size, args.hid_size, 0, args.dropout, alpha=0)
# learn the best graph for MRE
self.adjust_layers = nn.ModuleList([GAT(args.hid_size, args.hid_size, 0, args.dropout, alpha=0) for i in range(self.num_layers)])
self.graph_learner = GraphLearner(input_size=args.hid_size, hidden_size=args.hid_size,
graph_type=args.graph_type, top_k=args.top_k,
epsilon=args.epsilon, num_pers=args.num_per,
metric_type=args.graph_metric_type, temperature=args.temperature,
feature_denoise=args.feature_denoise, device=self.device)
self.fc_mu = nn.Linear(3 * args.hid_size, 3 * args.hid_size) # mu var
self.fc_logvar = nn.Linear(3 * args.hid_size, 3 * args.hid_size)
self.topic_kl_weight = args.topic_beta
self.topic_model = DecoderNetwork(
text_bow_size, visual_bow_size, args.hid_size, args.inference_type, args.n_topics, args.model_type,
args.hid_size, args.activation,
args.dropout, args.learn_priors, label_size=args.label_size)
self.topic_keywords_number = args.topic_keywords_number
self.classifier1 = nn.Linear(args.hid_size, num_labels)
self.classifier2 = nn.Linear(args.hid_size * 3, num_labels)
def forward(self, input_ids=None, attention_mask=None, head_tail_pos=None, piece2word=None,
adj_matrix=None, labels=None, aux_imgs=None, aux_mask=None, edge_mask=None, writer=None, step=None,
X_T_bow=None, X_V_bow=None):
"""
:param input_ids: [batch_size, seq_len]
:param attention_mask: [batch_size, seq_len]
:param head_tail_pos: [batch_size, 4]
:param piece2word: [batch_size, seq_len, seq_len]
:param adj_matrix: [batch_size, seq_len, seq_len]
"""
bsz = input_ids.size(0)
text_hidden_state = self.text_model(input_ids, attention_mask).last_hidden_state
length = piece2word.size(1)
min_value = torch.min(text_hidden_state).item()
# Max pooling word representations from pieces
_bert_embs = text_hidden_state.unsqueeze(1).expand(-1, length, -1, -1)
_bert_embs = torch.masked_fill(_bert_embs, piece2word.eq(0).unsqueeze(-1), min_value)
text_hidden_states, _ = torch.max(_bert_embs, dim=2)
imgs_hidden_states = []
aux_num = aux_imgs.size(1)
for i in range(bsz):
temp = []
for j in range(aux_num):
_temp = self.vision_model(aux_imgs[i, j, :, :].unsqueeze(0)).pooler_output
temp.append(_temp.squeeze())
imgs_hidden_states.append(torch.stack(temp, dim=0))
imgs_hidden_states = torch.stack(imgs_hidden_states, dim=0)
text_hidden_states = self.text_linear(text_hidden_states)
imgs_hidden_states = self.vision_linear(imgs_hidden_states)
adj = self.fuse(text_hidden_states, attention_mask, adj_matrix, self.args.threshold, imgs_hidden_states)
hidden_states = torch.cat([text_hidden_states, imgs_hidden_states], dim=1)
hidden_states = self.cross_GAT_layers(hidden_states, adj)
prior_mean, prior_variance, posterior_mean, posterior_variance, \
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.topic_model(X_T_bow,
hidden_states,
labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._topic_reconstruction_loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
topic_loss = self.topic_kl_weight * kl_loss + t_rl_loss + v_rl_loss
topic_loss = topic_loss.sum()
node_mask = torch.cat([attention_mask, aux_mask], dim=-1)
for layer in self.adjust_layers:
new_feature, new_adj = self.learn_graph(node_features=hidden_states,
graph_skip_conn=self.args.graph_skip_conn,
graph_include_self=self.args.graph_include_self,
init_adj=adj, node_mask=node_mask)
adj = torch.mul(new_adj, edge_mask)
hidden_states = layer(new_feature, adj)
# hidden_states = new_hidden_states
# adj = new_adj
edge_number = self.cnt_edges(adj)
writer.add_scalar(tag='edge_number', scalar_value=edge_number/bsz,
global_step=step) # tensorbordx
a = torch.mean(hidden_states, dim=1)
entity_hidden_state = torch.Tensor(bsz, 2 * self.args.hid_size) # batch, 2*hidden
for i in range(bsz):
head_idx = head_tail_pos[i][:2]
tail_idx = head_tail_pos[i][2:]
head_hidden = torch.max(hidden_states[i, head_idx, :], dim=0).values
tail_hidden = torch.max(hidden_states[i, tail_idx, :], dim=0).values
entity_hidden_state[i] = torch.cat([head_hidden, tail_hidden], dim=-1)
entity_hidden_state = entity_hidden_state.to(self.args.device)
z_hat = torch.cat([entity_hidden_state, a], dim=-1)
mu = self.fc_mu(z_hat)
std = F.softplus(self.fc_logvar(z_hat))
z = self.reparametrize_n(mu, std)
logits1 = self.classifier1(z)
# topic integration
topic_distribution = self.topic_model.get_theta(X_T_bow, hidden_states, labels)
topic_label = torch.argmax(topic_distribution, dim=-1)
visual_topic_keywords_distribution = self.topic_model.beta # n_topics * Vocab
textual_topic_keywords_distribution = self.topic_model.alpha
_, T_idxs = torch.topk(torch.index_select(textual_topic_keywords_distribution, dim=0, index=topic_label),
self.topic_keywords_number)
T_idxs = T_idxs.cpu().numpy()
T_component_words = [[self.idx_2_T_token[idx] for idx in T_idxs[i]] for i in T_idxs.shape[0]]
_, V_idxs = torch.topk(torch.index_select(visual_topic_keywords_distribution, dim=0, index=topic_label),
self.topic_keywords_number)
V_idxs = V_idxs.cpu().numpy()
V_component_words = [[self.idx_2_V_token[idx] for idx in V_idxs[i]] for i in V_idxs.shape[0]]
T_component_words_emb = self.text_model(self.tokenizer(T_component_words)).last_hidden_state
V_component_words_emb = self.vision_model(self.processor(V_component_words)).last_hidden_state
T_topic_inte = self._topic_words_attention(T_component_words_emb, z)
V_topic_inte = self._topic_words_attention(V_component_words_emb, z)
s = torch.cat([z, T_topic_inte, V_topic_inte], dim=-1)
logits2 = self.classifier2(s)
return (mu, std), logits1, logits2, topic_loss
def _topic_words_attention(self, topic_words_rep, compressed_rep):
"""
Integrate the topic keywords retrieved from the latent topic model into the compressed representation for
enhancing the context.
"""
_x = compressed_rep.unsqueeze(1).repeat(1, topic_words_rep.size(1), 1)
attn_weights = torch.sigmoid(torch.sum(_x * topic_words_rep, dim=-1))
attn_weights = attn_weights.unsqueeze(2).repeat(1, 1, topic_words_rep.size(2))
out = torch.sum(topic_words_rep*attn_weights, dim=1)
return out
def _topic_reconstruction_loss(self, text_inputs, visual_inputs, text_word_dists, visual_word_dists, prior_mean,
prior_variance, posterior_mean, posterior_variance, posterior_log_variance):
# KL term
# var division term
var_division = torch.sum(posterior_variance / prior_variance, dim=1)
# diff means term
diff_means = prior_mean - posterior_mean
diff_term = torch.sum(
(diff_means * diff_means) / prior_variance, dim=1)
# logvar det division term
logvar_det_division = \
prior_variance.log().sum() - posterior_log_variance.sum(dim=1)
# combine terms
KL = 0.5 * (
var_division + diff_term - self.n_components + logvar_det_division)
# Reconstruction term
T_RL = -torch.sum(text_inputs * torch.log(text_word_dists + 1e-10), dim=1)
V_RL = -torch.sum(visual_inputs * torch.log(visual_word_dists + 1e-10), dim=1)
return KL, T_RL, V_RL
def learn_graph(self, node_features, graph_skip_conn=None, graph_include_self=False, init_adj=None,
node_mask=None):
new_feature, new_adj = self.graph_learner(node_features, node_mask=node_mask)
bsz = node_features.size(0)
if graph_skip_conn in (0.0, None):
# add I
if graph_include_self:
if torch.cuda.is_available():
new_adj = new_adj + torch.stack([torch.eye(new_adj.size(1)) for _ in range(bsz)], dim=0).cuda()
else:
new_adj = new_adj + torch.stack([torch.eye(new_adj.size(1)) for _ in range(bsz)], dim=0)
else:
# skip connection
new_adj = graph_skip_conn * init_adj + (1 - graph_skip_conn) * new_adj
return new_feature, new_adj
def reparametrize_n(self, mu, std, n=1):
eps = Variable(std.data.new(std.size()).normal_())
return mu + eps * std
def cnt_edges(self, adj):
e = torch.ones_like(adj)
o = torch.zeros_like(adj)
a = torch.where(adj > 0.0, e, o)
from torch_geometric.utils import remove_self_loops
edge_number = remove_self_loops(edge_index=dense_to_sparse(a)[0])[0].size(1) / 2
return edge_number
def reset_parameters(self):
self.text_linear.reset_parameters()
self.vision_linear.reset_parameters()
self.GAT_layer.reset_parameters()
self.graph_learner.reset_parameters()
for layer in self.layers:
layer.reset_parameters()
| 11,439 | 47.680851 | 137 | py |
MRE-ISE | MRE-ISE-main/cores/gene/backbone.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import linalg as LA
from torch.autograd import Variable
from torch_geometric.utils import to_dense_adj, dense_to_sparse
from torch.distributions.relaxed_bernoulli import RelaxedBernoulli, LogitRelaxedBernoulli
from torch.distributions import Bernoulli, Normal
from torch.distributions.multivariate_normal import MultivariateNormal
VERY_SMALL_NUMBER = 1e-12
INF = 1e20
class GCN(nn.Module):
def __init__(self, in_features, out_features):
super(GCN, self).__init__()
self.linear = nn.Linear(in_features=in_features, out_features=out_features)
self.reset_parameters()
def forward(self, hidden_state=None, adjacent_matrix=None):
"""
:param hidden_state: [batch_size, seq_len, hid_size]
:param adjacent_matrix: [batch_size, seq_len, seq_len]
"""
_x = self.linear(hidden_state)
_x = torch.matmul(adjacent_matrix, _x)
_x = F.relu(_x) + hidden_state
return _x
def reset_parameters(self):
self.linear.reset_parameters()
def __repr__(self):
return self.__class__.__name__
class GAT(nn.Module):
def __init__(self, in_features, out_features, heads, dropout, alpha, concat=True):
super(GAT, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.heads = heads
self.alpha = alpha
self.dropout = dropout
self.concat = concat
self.linear = nn.Linear(in_features=in_features, out_features=out_features, bias=False)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.reset_parameters()
def forward(self, hidden_state, adjacent_matrix=None):
"""
:param hidden_state: [batch_size, seq_len, in_features]
:param adjacent_matrix: [batch_size, seq_len, seq_len]
"""
_x = self.linear(hidden_state)
e = self._para_attentional_mechanism_input(_x)
zero_vec = -9e15 * torch.ones_like(e)
attention = torch.where(adjacent_matrix > 0.5, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, _x)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _para_attentional_mechanism_input(self, Wh):
"""
:param Wh: [batch_size, seq_len, out_features]
"""
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.transpose(1, 2)
return self.leakyrelu(e)
def reset_parameters(self):
self.linear.reset_parameters()
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class FustionLayer(nn.Module):
def __init__(self, hid_size):
super(FustionLayer, self).__init__()
self.ln = nn.Sequential(nn.Linear(1 * hid_size, 1 * hid_size),
nn.ReLU())
self.reset_parameters()
def forward(self, text_hidden_states=None, text_attention_mask=None, text_adj_matrix=None, threshold=0.5,
imgs_hidden_states=None, img_attention_mask=None, img_adj_matrix=None):
"""
build cross_modal graph.
we build an edge between a and b only the semantic similarity sem_sim(a, b) > threshold.
:param text_hidden_states: [batch_size, seq_len, in_features]
:param text_adj_matrix: [batch_size, seq_len, seq_len]
:param text_attention_mask: [batch_size, seq_len]
:param imgs_hidden_states: [batch_size, num_objects, in-features]
:param img_adj_matrix: [batch_size, obj_num, obj_num]
:param img_attention_mask: [batch_size, obj_num]
"""
batch_size, seq_len = text_hidden_states.size(0), text_hidden_states.size(1)
num_objects = imgs_hidden_states.size(1)
_x = self.ln(text_hidden_states)
_y = self.ln(imgs_hidden_states)
_temp = F.sigmoid(torch.bmm(_x, _y.transpose(1, 2)))
min_value = torch.min(_temp)
_temp = _temp.masked_fill_(1 - text_attention_mask.byte().unsqueeze(-1), min_value)
if img_attention_mask is not None:
_temp = _temp.masked_fill_(1 - img_attention_mask.byte().unsqueeze(1), min_value)
max_num_nodes = seq_len + num_objects
size = [batch_size, max_num_nodes, max_num_nodes]
new_adj_matrix = []
for b in range(batch_size):
old_text_edges = dense_to_sparse(text_adj_matrix[b])[0]
new_edges = (_temp[b] > threshold).nonzero()
head = new_edges[:, 0]
tail = new_edges[:, 1] + seq_len
if img_attention_mask is not None:
old_img_edges = dense_to_sparse(img_adj_matrix[b])[0]
old_img_edges = [old_img_edges[i]+seq_len for i in range(2)]
edge_index = torch.stack([torch.cat([old_text_edges[0], head, old_img_edges[0]]), torch.cat([old_text_edges[1], tail, old_img_edges[1]])], dim=0)
else:
edge_index = torch.stack([torch.cat([old_text_edges[0], head]), torch.cat([old_text_edges[1], tail])], dim=0)
raw_adj = to_dense_adj(edge_index, max_num_nodes=max_num_nodes)
new_adj_matrix.append(raw_adj)
new_adj = torch.stack(new_adj_matrix, dim=0).squeeze().view(size)
return new_adj
def reset_parameters(self):
for layer in self.ln:
if isinstance(layer, nn.Linear):
layer.reset_parameters()
class GraphLearner(nn.Module):
def __init__(self, input_size, hidden_size, graph_type, top_k=None, epsilon=None, num_pers=1,
metric_type="attention", feature_denoise=True, device=None,
temperature=0.1):
super(GraphLearner, self).__init__()
self.device = device
self.input_size = input_size
self.hidden_size = hidden_size
self.num_pers = num_pers
self.graph_type = graph_type
self.top_k = top_k
self.epsilon = epsilon
self.metric_type = metric_type
self.feature_denoise = feature_denoise
self.temperature = temperature
if metric_type == 'attention':
self.linear_sims = nn.ModuleList(
[nn.Linear(self.input_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, -num_pers))
elif metric_type == 'weighted_cosine':
self.weight_tensor = torch.Tensor(num_pers, self.input_size)
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
elif metric_type == 'gat_attention':
self.linear_sims1 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.leakyrelu = nn.LeakyReLU(0.2)
print('[ GAT_Attention GraphLearner]')
elif metric_type == 'kernel':
self.precision_inv_dis = nn.Parameter(torch.Tensor(1, 1))
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(input_size, hidden_size)))
elif metric_type == 'transformer':
self.linear_sim1 = nn.Linear(input_size, hidden_size, bias=False)
self.linear_sim2 = nn.Linear(input_size, hidden_size, bias=False)
elif metric_type == 'cosine':
pass
elif metric_type == 'mlp':
self.lin = nn.Linear(self.input_size*2, 1)
elif metric_type == 'multi_mlp':
self.linear_sims1 = nn.ModuleList(
[nn.Linear(self.input_size, hidden_size, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList(
[nn.Linear(self.hidden_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} GraphLearner: {} ]'.format(metric_type, num_pers))
else:
raise ValueError('Unknown metric_type: {}'.format(metric_type))
if self.feature_denoise:
self.feat_mask = self.construct_feat_mask(input_size, init_strategy="constant")
print('[ Graph Learner metric type: {}, Graph Type: {} ]'.format(metric_type, self.graph_type))
def reset_parameters(self):
if self.feature_denoise:
self.feat_mask = self.construct_feat_mask(self.input_size, init_strategy="constant")
if self.metric_type == 'attention':
for module in self.linear_sims:
module.reset_parameters()
elif self.metric_type == 'weighted_cosine':
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
elif self.metric_type == 'gat_attention':
for module in self.linear_sims1:
module.reset_parameters()
for module in self.linear_sims2:
module.reset_parameters()
elif self.metric_type == 'kernel':
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.init.xavier_uniform_(self.weight)
elif self.metric_type == 'transformer':
self.linear_sim1.reset_parameters()
self.linear_sim2.reset_parameters()
elif self.metric_type == 'cosine':
pass
elif self.metric_type == 'mlp':
self.lin1.reset_parameters()
self.lin2.reset_parameters()
elif self.metric_type == 'multi_mlp':
for module in self.linear_sims1:
module.reset_parameters()
for module in self.linear_sims2:
module.reset_parameters()
else:
raise ValueError('Unknown metric_type: {}'.format(self.metric_type))
def forward(self, node_features, node_mask=None):
if self.feature_denoise:
masked_features = self.mask_feature(node_features)
learned_adj = self.learn_adj(masked_features, ctx_mask=node_mask)
return masked_features, learned_adj
else:
learned_adj = self.learn_adj(node_features, ctx_mask=node_mask)
return node_features, learned_adj
def learn_adj(self, context, ctx_mask=None):
"""
Parameters
:context, (batch_size, ctx_size, dim)
:ctx_mask, (batch_size, ctx_size)
Returns
:attention, (batch_size, ctx_size, ctx_size)
"""
if self.metric_type == 'attention':
attention = 0
for _ in range(len(self.linear_sims)):
context_fc = torch.relu(self.linear_sims[_](context))
attention += torch.matmul(context_fc, context_fc.transpose(-1, -2))
attention /= len(self.linear_sims)
markoff_value = -INF
elif self.metric_type == 'weighted_cosine':
expand_weight_tensor = self.weight_tensor.unsqueeze(1)
if len(context.shape) == 3:
expand_weight_tensor = expand_weight_tensor.unsqueeze(1)
context_fc = context.unsqueeze(0) * expand_weight_tensor
context_norm = F.normalize(context_fc, p=2, dim=-1)
attention = torch.matmul(context_norm, context_norm.transpose(-1, -2)).mean(0)
markoff_value = 0
elif self.metric_type == 'transformer':
Q = self.linear_sim1(context)
attention = torch.matmul(Q, Q.transpose(-1, -2)) / math.sqrt(Q.shape[-1])
markoff_value = -INF
elif self.metric_type == 'gat_attention':
attention = []
for _ in range(len(self.linear_sims1)):
a_input1 = self.linear_sims1[_](context)
a_input2 = self.linear_sims2[_](context)
attention.append(self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)))
attention = torch.mean(torch.stack(attention, 0), 0)
markoff_value = -INF
# markoff_value = 0
elif self.metric_type == 'kernel':
dist_weight = torch.mm(self.weight, self.weight.transpose(-1, -2))
attention = self.compute_distance_mat(context, dist_weight)
attention = torch.exp(-0.5 * attention * (self.precision_inv_dis ** 2))
markoff_value = 0
elif self.metric_type == 'cosine':
context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))
attention = torch.mm(context_norm, context_norm.transpose(-1, -2)).detach()
markoff_value = 0
elif self.metric_type == 'mlp':
seq_len = context.size(1)
# context_fc = torch.relu(self.lin2(torch.relu(self.lin1(context))))
# attention = F.sigmoid(torch.matmul(context_fc, context_fc.transpose(-1, -2)))
context_fc = context.unsqueeze(1).repeat(1, seq_len, 1, 1)
context_bc = context.unsqueeze(2).repeat(1, 1, seq_len, 1)
attention = F.sigmoid(self.lin(torch.cat([context_fc, context_bc], dim=-1)).squeeze())
markoff_value = 0
elif self.metric_type == 'multi_mlp':
attention = 0
for _ in range(self.num_pers):
context_fc = torch.relu(self.linear_sims2[_](torch.relu(self.linear_sims1[_](context))))
attention += F.sigmoid(torch.matmul(context_fc, context_fc.transpose(-1, -2)))
attention /= self.num_pers
markoff_value = -INF
if ctx_mask is not None:
attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(1), markoff_value)
attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(-1), markoff_value)
if self.graph_type == 'epsilonNN':
assert self.epsilon is not None
attention = self.build_epsilon_neighbourhood(attention, self.epsilon, markoff_value)
elif self.graph_type == 'KNN':
assert self.top_k is not None
attention = self.build_knn_neighbourhood(attention, self.top_k, markoff_value)
elif self.graph_type == 'prob':
attention = self.build_prob_neighbourhood(attention, self.epsilon, temperature=self.temperature)
else:
raise ValueError('Unknown graph_type: {}'.format(self.graph_type))
if self.graph_type in ['KNN', 'epsilonNN']:
if self.metric_type in ('kernel', 'weighted_cosine'):
assert attention.min().item() >= 0
attention = attention / torch.clamp(torch.sum(attention, dim=-1, keepdim=True), min=VERY_SMALL_NUMBER)
elif self.metric_type == 'cosine':
attention = (attention > 0).float()
attention = self.normalize_adj(attention)
elif self.metric_type in ('transformer', 'attention', 'gat_attention'):
attention = torch.softmax(attention, dim=-1)
return attention
def normalize_adj(mx):
"""Row-normalize matrix: symmetric normalized Laplacian"""
rowsum = mx.sum(1)
r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()
r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = torch.diag(r_inv_sqrt)
return torch.mm(torch.mm(mx, r_mat_inv_sqrt).transpose(-1, -2), r_mat_inv_sqrt)
def build_knn_neighbourhood(self, attention, top_k, markoff_value):
top_k = min(top_k, attention.size(-1))
knn_val, knn_ind = torch.topk(attention, top_k, dim=-1)
weighted_adjacency_matrix = (markoff_value * torch.ones_like(attention)).scatter_(-1, knn_ind, knn_val)
weighted_adjacency_matrix = weighted_adjacency_matrix.to(self.device)
return weighted_adjacency_matrix
def build_epsilon_neighbourhood(self, attention, epsilon, markoff_value):
attention = torch.sigmoid(attention)
mask = (attention > epsilon).detach().float()
weighted_adjacency_matrix = attention * mask + markoff_value * (1 - mask)
return weighted_adjacency_matrix
def build_prob_neighbourhood(self, attention, epsilon=0.1, temperature=0.1):
# attention = torch.clamp(attention, 0.01, 0.99)
weighted_adjacency_matrix = RelaxedBernoulli(temperature=torch.Tensor([temperature]).to(attention.device),
probs=attention).rsample()
# eps = 0.5
mask = (weighted_adjacency_matrix > epsilon).detach().float()
weighted_adjacency_matrix = weighted_adjacency_matrix * mask + 0.0 * (1 - mask)
return weighted_adjacency_matrix
def compute_distance_mat(self, X, weight=None):
if weight is not None:
trans_X = torch.mm(X, weight)
else:
trans_X = X
norm = torch.sum(trans_X * X, dim=-1)
dists = -2 * torch.matmul(trans_X, X.transpose(-1, -2)) + norm.unsqueeze(0) + norm.unsqueeze(1)
return dists
def construct_feat_mask(self, feat_dim, init_strategy="normal"):
mask = nn.Parameter(torch.FloatTensor(feat_dim))
if init_strategy == "normal":
std = 0.1
with torch.no_grad():
mask.normal_(1.0, std)
elif init_strategy == "constant":
with torch.no_grad():
nn.init.constant_(mask, 0.0)
return mask
def mask_feature(self, x, use_sigmoid=True, marginalize=True):
feat_mask = (torch.sigmoid(self.feat_mask) if use_sigmoid else self.feat_mask).to(self.device)
if marginalize:
std_tensor = torch.ones_like(x, dtype=torch.float) / 2
mean_tensor = torch.zeros_like(x, dtype=torch.float) - x
z = torch.normal(mean=mean_tensor, std=std_tensor).to(self.device)
x = x + z * (1 - feat_mask)
else:
x = x * feat_mask
return x
class DynamicLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1, bias=True, batch_first=True, dropout=0,
bidirectional=False, only_use_last_hidden_state=False, rnn_type='LSTM'):
"""
LSTM which can hold variable length sequence, use like TensorFlow's RNN(input, length...).
:param input_size:The number of expected features in the input x
:param hidden_size:The number of features in the hidden state h
:param num_layers:Number of recurrent layers.
:param bias:If False, then the layer does not use bias weights b_ih and b_hh. Default: True
:param batch_first:If True, then the input and output tensors are provided as (batch, seq, feature)
:param dropout:If non-zero, introduces a dropout layer on the outputs of each RNN layer except the last layer
:param bidirectional:If True, becomes a bidirectional RNN. Default: False
:param rnn_type: {LSTM, GRU, RNN}
"""
super(DynamicLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = dropout
self.bidirectional = bidirectional
self.only_use_last_hidden_state = only_use_last_hidden_state
self.rnn_type = rnn_type
if self.rnn_type == 'LSTM':
self.RNN = nn.LSTM(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'GRU':
self.RNN = nn.GRU(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
elif self.rnn_type == 'RNN':
self.RNN = nn.RNN(
input_size=input_size, hidden_size=hidden_size, num_layers=num_layers,
bias=bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional)
def forward(self, x, x_len, h0=None):
"""
sequence -> sort -> pad and pack ->process using RNN -> unpack ->unsort
:param x: sequence embedding vectors
:param x_len: numpy/tensor list
:return:
"""
"""sort"""
x_sort_idx = torch.argsort(-x_len)
x_unsort_idx = torch.argsort(x_sort_idx).long()
x_len = x_len[x_sort_idx]
x = x[x_sort_idx.long()]
"""pack"""
x_emb_p = torch.nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=self.batch_first)
if self.rnn_type == 'LSTM':
if h0 is None:
out_pack, (ht, ct) = self.RNN(x_emb_p, None)
else:
out_pack, (ht, ct) = self.RNN(x_emb_p, (h0, h0))
else:
if h0 is None:
out_pack, ht = self.RNN(x_emb_p, None)
else:
out_pack, ht = self.RNN(x_emb_p, h0)
ct = None
"""unsort: h"""
ht = torch.transpose(ht, 0, 1)[
x_unsort_idx]
ht = torch.transpose(ht, 0, 1)
if self.only_use_last_hidden_state:
return ht
else:
"""unpack: out"""
out = torch.nn.utils.rnn.pad_packed_sequence(out_pack, batch_first=self.batch_first)
out = out[0] #
out = out[x_unsort_idx]
"""unsort: out c"""
if self.rnn_type == 'LSTM':
ct = torch.transpose(ct, 0, 1)[
x_unsort_idx]
ct = torch.transpose(ct, 0, 1)
return out, (ht, ct)
| 21,984 | 43.414141 | 161 | py |
MRE-ISE | MRE-ISE-main/cores/gene/train.py | import pickle
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from sklearn.metrics import classification_report
from transformers.optimization import get_linear_schedule_with_warmup
from modules.metrics import eval_result
import math
class Trainer(object):
def __init__(self, train_data=None, dev_data=None, test_data=None, re_dict=None, model=None, process=None,
args=None, logger=None, writer=None) -> None:
self.train_data = train_data
self.dev_data = dev_data
self.test_data = test_data
self.re_dict = re_dict
self.model = model
self.process = process
self.logger = logger
self.writer = writer
self.refresh_step = 2
self.best_dev_metric = 0
self.best_test_metric = 0
self.best_dev_epoch = None
self.best_test_epoch = None
self.optimizer = None
if self.train_data is not None:
self.train_num_steps = len(self.train_data) * args.num_epochs
self.step = 0
self.args = args
if self.args.do_train:
self.before_multimodal_train()
self.loss_func = nn.CrossEntropyLoss()
def train(self):
self.step = 0
self.model.train()
self.logger.info("***** Running training *****")
self.logger.info(" Num instance = %d", len(self.train_data) * self.args.batch_size)
self.logger.info(" Num epoch = %d", self.args.num_epochs)
self.logger.info(" Batch size = %d", self.args.batch_size)
self.logger.info(" Learning rate = {}".format(self.args.lr))
self.logger.info(" Evaluate begin = %d", self.args.eval_begin_epoch)
if self.args.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.args.load_path))
self.model.load_state_dict(torch.load(self.args.load_path))
self.logger.info("Load model successful!")
with tqdm(total=self.train_num_steps, postfix='loss:{0:<6.5f}', leave=False, dynamic_ncols=True,
initial=self.step) as pbar:
self.pbar = pbar
avg_loss = 0
for epoch in range(1, self.args.num_epochs + 1):
pbar.set_description_str(desc="Epoch {}/{}".format(epoch, self.args.num_epochs))
for batch in self.train_data:
self.step += 1
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in batch)
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="train", step=self.step)
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
self.writer.add_scalar(tag='task_loss', scalar_value=task_loss.detach().cpu().item(),
global_step=self.step)
self.writer.add_scalar(tag='GIB_loss', scalar_value=GIB_loss.detach().cpu().item(),
global_step=self.step)
self.writer.add_scalar(tag='topic_loss', scalar_value=topic_loss.detach().cpu().item(),
global_step=self.step)
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
avg_loss += loss.detach().cpu().item()
loss.backward()
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if self.step % self.refresh_step == 0:
avg_loss = float(avg_loss) / self.refresh_step
print_output = "loss:{:<6.5f}".format(avg_loss)
pbar.update(self.refresh_step)
pbar.set_postfix_str(print_output)
if self.writer is not None:
self.writer.add_scalar(tag='train_loss', scalar_value=avg_loss,
global_step=self.step) # tensorbordx
avg_loss = 0
if self.step % 50 == 0:
self.evaluate(self.step)
if epoch >= self.args.eval_begin_epoch:
self.test(epoch)
pbar.close()
self.pbar = None
self.logger.info("Get best dev performance at epoch {}, best dev f1 score is {}".format(self.best_dev_epoch,
self.best_dev_metric))
self.logger.info(
"Get best test performance at epoch {}, best test f1 score is {}".format(self.best_test_epoch,
self.best_test_metric))
def evaluate(self, epoch):
self.model.eval()
self.logger.info("***** Running evaluate *****")
self.logger.info(" Num instance = %d", len(self.dev_data) * self.args.batch_size)
self.logger.info(" Batch size = %d", self.args.batch_size)
step = 0
true_labels, pred_labels = [], []
with torch.no_grad():
with tqdm(total=len(self.dev_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Dev")
total_loss = 0
for batch in self.dev_data:
step += 1
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in
batch) # to cpu/cuda device
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="dev")
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
total_loss += loss.detach().cpu().item()
preds = logits2.argmax(-1)
true_labels.extend(labels.view(-1).detach().cpu().tolist())
pred_labels.extend(preds.view(-1).detach().cpu().tolist())
pbar.update()
# evaluate done
pbar.close()
sk_result = classification_report(y_true=true_labels, y_pred=pred_labels,
labels=list(self.re_dict.values())[1:],
target_names=list(self.re_dict.keys())[1:], digits=4)
self.logger.info("%s\n", sk_result)
result = eval_result(true_labels, pred_labels, self.re_dict, self.logger)
acc, micro_f1 = round(result['acc'] * 100, 4), round(result['micro_f1'] * 100, 4)
if self.writer is not None:
self.writer.add_scalar(tag='dev_acc', scalar_value=acc, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='dev_f1', scalar_value=micro_f1, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='dev_loss', scalar_value=total_loss / len(self.test_data),
global_step=epoch) # tensorbordx
self.logger.info("Epoch {}/{}, best dev f1: {}, best epoch: {}, current dev f1 score: {}, acc: {}." \
.format(epoch, self.args.num_epochs, self.best_dev_metric, self.best_dev_epoch,
micro_f1, acc))
if micro_f1 >= self.best_dev_metric: # this epoch get best performance
self.logger.info("Get better performance at epoch {}".format(epoch))
self.best_dev_epoch = epoch
self.best_dev_metric = micro_f1 # update best metric(f1 score)
if self.args.save_path is not None: # save model
torch.save(self.model.state_dict(), self.args.save_path + "/best_model.pth")
self.logger.info("Save best model at {}".format(self.args.save_path))
self.model.train()
def test(self, epoch):
self.model.eval()
self.logger.info("\n***** Running testing *****")
self.logger.info(" Num instance = %d", len(self.test_data) * self.args.batch_size)
self.logger.info(" Batch size = %d", self.args.batch_size)
if self.args.load_path is not None: # load model from load_path
self.logger.info("Loading model from {}".format(self.args.load_path + "/best_model.pth"))
self.model.load_state_dict(torch.load(self.args.load_path + "/best_model.pth"))
self.logger.info("Load model successful!")
self.model.to(self.args.device)
true_labels, pred_labels = [], []
with torch.no_grad():
with tqdm(total=len(self.test_data), leave=False, dynamic_ncols=True) as pbar:
pbar.set_description_str(desc="Testing")
total_loss = 0
for batch in self.test_data:
batch = (tup.to(self.args.device) if isinstance(tup, torch.Tensor) else tup for tup in
batch) # to cpu/cuda device
(mu, std), logits1, logits2, labels, topic_loss = self._step(batch, mode="dev")
GIB_loss = self.loss_func(logits1, labels.view(-1))
task_loss = self.loss_func(logits2, labels.view(-1))
if self.args.is_IB:
KL_loss = -0.5 * (1 + 2 * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
GIB_loss += self.args.beta * KL_loss
loss = task_loss + self.args.eta1 * GIB_loss + self.args.eta2 * topic_loss
total_loss += loss.detach().cpu().item()
preds = logits2.argmax(-1)
true_labels.extend(labels.view(-1).detach().cpu().tolist())
pred_labels.extend(preds.view(-1).detach().cpu().tolist())
pbar.update()
# evaluate done
pbar.close()
sk_result = classification_report(y_true=true_labels, y_pred=pred_labels,
labels=list(self.re_dict.values())[1:],
target_names=list(self.re_dict.keys())[1:], digits=4)
self.logger.info("%s\n", sk_result)
result = eval_result(true_labels, pred_labels, self.re_dict, self.logger)
acc, micro_f1 = round(result['acc'] * 100, 4), round(result['micro_f1'] * 100, 4)
if self.writer is not None:
self.writer.add_scalar(tag='test_acc', scalar_value=acc, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='test_f1', scalar_value=micro_f1, global_step=epoch) # tensorbordx
self.writer.add_scalar(tag='test_loss', scalar_value=total_loss / len(self.test_data),
global_step=epoch) # tensorbordx
total_loss = 0
############
self.logger.info("Epoch {}/{}, best test f1: {}, best epoch: {}, current test f1 score: {}, acc: {}" \
.format(epoch, self.args.num_epochs, self.best_test_metric, self.best_test_epoch,
micro_f1, acc))
if micro_f1 >= self.best_test_metric: # this epoch get best performance
self.best_test_metric = micro_f1
self.best_test_epoch = epoch
self.model.train()
def _step(self, batch, mode="train", step=0):
input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, labels, image, aux_imgs, \
aux_mask, edge_mask, vbow_features, tbow_features = batch
(mu, std), logits1, logits2, topic_loss = self.model(input_ids=input_ids, attention_mask=attention_mask,
head_tail_pos=head_tail_pos, piece2word=pieces2word,
adj_matrix=adj_matrix, labels=labels,
aux_imgs=aux_imgs, aux_mask=aux_mask,
edge_mask=edge_mask, writer=self.writer, step=step,
X_T_bow=tbow_features, X_V_bow=vbow_features)
return (mu, std), logits1, logits2, labels, topic_loss
def before_multimodal_train(self):
pretrained_params = []
main_params = []
for name, param in self.model.named_parameters():
if 'text_model' in name:
pretrained_params.append(param)
elif 'vision_model' in name:
pretrained_params.append(param)
else:
main_params.append(param)
optimizer_grouped_parameters = [
{'params': pretrained_params, 'lr': self.args.lr_pretrained, 'weight_decay': 1e-2},
{'params': main_params, 'lr': self.args.lr_main, 'weight_decay': 1e-2},
]
self.optimizer = optim.Adam(optimizer_grouped_parameters)
self.scheduler = get_linear_schedule_with_warmup(optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_ratio * self.train_num_steps,
num_training_steps=self.train_num_steps)
self.model.to(self.args.device)
| 14,294 | 56.874494 | 122 | py |
DEAT | DEAT-main/preactresnet.py | '''Pre-activation ResNet in PyTorch.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv:1603.05027
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
track_running_stats=True
affine=True
normal_func = nn.BatchNorm2d
# track_running_stats=False
# affine=True
# normal_func = nn.InstanceNorm2d
if not track_running_stats:
print('BN track False')
class PreActBlock(nn.Module):
'''Pre-activation version of the BasicBlock.'''
expansion = 1
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBlock, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
def forward(self, x):
out = self.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(self.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
'''Pre-activation version of the original Bottleneck module.'''
expansion = 4
def __init__(self, in_planes, planes, stride=1, activation='ReLU', softplus_beta=1):
super(PreActBottleneck, self).__init__()
self.bn1 = normal_func(in_planes, track_running_stats=track_running_stats, affine=affine)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = normal_func(planes, track_running_stats=track_running_stats, affine=affine)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.normalize = normalize
self.normalize_only_FN = normalize_only_FN
self.scale = scale
self.activation = activation
self.softplus_beta = softplus_beta
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = normal_func(512 * block.expansion, track_running_stats=track_running_stats, affine=affine)
if self.normalize:
self.linear = nn.Linear(512*block.expansion, num_classes, bias=False)
else:
self.linear = nn.Linear(512*block.expansion, num_classes)
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
print('ReLU')
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
print('Softplus')
elif activation == 'GELU':
self.relu = nn.GELU()
print('GELU')
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('ELU')
elif activation == 'LeakyReLU':
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
print('LeakyReLU')
elif activation == 'SELU':
self.relu = nn.SELU(inplace=True)
print('SELU')
elif activation == 'CELU':
self.relu = nn.CELU(alpha=1.2, inplace=True)
print('CELU')
elif activation == 'Tanh':
self.relu = nn.Tanh()
print('Tanh')
print('Use activation of ' + activation)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride,
activation=self.activation, softplus_beta=self.softplus_beta))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if self.normalize_only_FN:
out = F.normalize(out, p=2, dim=1)
if self.normalize:
out = F.normalize(out, p=2, dim=1) * self.scale
for _, module in self.linear.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.linear(out)
def PreActResNet18(num_classes=10, normalize = False, normalize_only_FN = False, scale = 15, activation='ReLU', softplus_beta=1):
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes, normalize = normalize
, normalize_only_FN = normalize_only_FN, scale = scale, activation=activation, softplus_beta=softplus_beta)
def PreActResNet34():
return PreActResNet(PreActBlock, [3,4,6,3])
def PreActResNet50():
return PreActResNet(PreActBottleneck, [3,4,6,3])
def PreActResNet101():
return PreActResNet(PreActBottleneck, [3,4,23,3])
def PreActResNet152():
return PreActResNet(PreActBottleneck, [3,8,36,3])
def test():
net = PreActResNet18()
y = net((torch.randn(1,3,32,32)))
print(y.size())
# test()
| 7,760 | 37.044118 | 152 | py |
DEAT | DEAT-main/utils.py | import numpy as np
from collections import namedtuple
import torch
from torch import nn
import torchvision
from torch.optim.optimizer import Optimizer, required
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
################################################################
## Components from https://github.com/davidcpage/cifar10-fast ##
################################################################
#####################
## data preprocessing
#####################
cifar10_mean = (0.4914, 0.4822, 0.4465) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = (0.2471, 0.2435, 0.2616) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def pad(x, border=4):
return np.pad(x, [(0, 0), (border, border), (border, border), (0, 0)], mode='reflect')
def transpose(x, source='NHWC', target='NCHW'):
return x.transpose([source.index(d) for d in target])
#####################
## data augmentation
#####################
class Crop(namedtuple('Crop', ('h', 'w'))):
def __call__(self, x, x0, y0):
return x[:,y0:y0+self.h,x0:x0+self.w]
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
def output_shape(self, x_shape):
C, H, W = x_shape
return (C, self.h, self.w)
class FlipLR(namedtuple('FlipLR', ())):
def __call__(self, x, choice):
return x[:, :, ::-1].copy() if choice else x
def options(self, x_shape):
return {'choice': [True, False]}
class Cutout(namedtuple('Cutout', ('h', 'w'))):
def __call__(self, x, x0, y0):
x = x.copy()
x[:,y0:y0+self.h,x0:x0+self.w].fill(0.0)
return x
def options(self, x_shape):
C, H, W = x_shape
return {'x0': range(W+1-self.w), 'y0': range(H+1-self.h)}
class Transform():
def __init__(self, dataset, transforms):
self.dataset, self.transforms = dataset, transforms
self.choices = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data, labels = self.dataset[index]
for choices, f in zip(self.choices, self.transforms):
args = {k: v[index] for (k,v) in choices.items()}
data = f(data, **args)
return data, labels
def set_random_choices(self):
self.choices = []
x_shape = self.dataset[0][0].shape
N = len(self)
for t in self.transforms:
options = t.options(x_shape)
x_shape = t.output_shape(x_shape) if hasattr(t, 'output_shape') else x_shape
self.choices.append({k:np.random.choice(v, size=N) for (k,v) in options.items()})
#####################
## dataset
#####################
def cifar10(root):
train_set = torchvision.datasets.CIFAR10(root=root, train=True, download=True)
test_set = torchvision.datasets.CIFAR10(root=root, train=False, download=True)
return {
'train': {'data': train_set.data, 'labels': train_set.targets},
'test': {'data': test_set.data, 'labels': test_set.targets}
}
#####################
## data loading
#####################
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).half(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
#####################
## new optimizer
#####################
class SGD_GCC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GCC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GCC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers
if len(list(d_p.size()))>3:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
class SGD_GC(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD_GC, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD_GC, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
#GC operation for Conv layers and FC layers
if len(list(d_p.size()))>1:
d_p.add_(-d_p.mean(dim = tuple(range(1,len(list(d_p.size())))), keepdim = True))
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(-group['lr'], d_p)
return loss
| 9,103 | 34.84252 | 122 | py |
DEAT | DEAT-main/train_cifar_DEAT.py | import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from Positive_Negative_Momentum.pnm_optim import *
import os
from wideresnet import WideResNet
from preactresnet import PreActResNet18, PreActResNet50
from models import *
from utils import *
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=10, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1]
* (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
return loss_value.mean()
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, mixup=False, y_a=None, y_b=None, lam=None,
early_stop=False, early_stop_pgd_max=1,
multitarget=False,
use_DLRloss=False, use_CWloss=False,
epoch=0, totalepoch=110, gamma=0.8,
use_adaptive=False, s_HE=15,
fast_better=False, BNeval=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
if BNeval:
model.eval()
for _ in range(restarts):
# early stop pgd counter for each x
early_stop_pgd_count = early_stop_pgd_max * torch.ones(y.shape[0], dtype=torch.int32).cuda()
# initialize perturbation
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
iter_count = torch.zeros(y.shape[0])
# craft adversarial examples
for _ in range(attack_iters):
output = model(normalize(X + delta))
# if use early stop pgd
if early_stop:
# calculate mask for early stop pgd
if_success_fool = (output.max(1)[1] != y).to(dtype=torch.int32)
early_stop_pgd_count = early_stop_pgd_count - if_success_fool
index = torch.where(early_stop_pgd_count > 0)[0]
iter_count[index] = iter_count[index] + 1
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
# Whether use mixup criterion
if fast_better:
loss_ori = F.cross_entropy(output, y)
grad_ori = torch.autograd.grad(loss_ori, delta, create_graph=True)[0]
loss_grad = (alpha / 4.) * (torch.norm(grad_ori.view(grad_ori.shape[0], -1), p=2, dim=1) ** 2)
loss = loss_ori + loss_grad.mean()
loss.backward()
grad = delta.grad.detach()
elif not mixup:
if multitarget:
random_label = torch.randint(low=0, high=10, size=y.shape).cuda()
random_direction = 2*((random_label == y).to(dtype=torch.float32) - 0.5)
loss = torch.mean(random_direction * F.cross_entropy(output, random_label, reduction='none'))
loss.backward()
grad = delta.grad.detach()
elif use_DLRloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * dlr_loss(output, y)
loss.backward()
grad = delta.grad.detach()
elif use_CWloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * CW_loss(output, y)
loss.backward()
grad = delta.grad.detach()
else:
if use_adaptive:
loss = F.cross_entropy(s_HE * output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
else:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
if BNeval:
model.train()
return max_delta, iter_count
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=110, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine', 'cyclic'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--test_epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--test-pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=100, type=int)
parser.add_argument('--mixture', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--mixture_alpha', type=float)
parser.add_argument('--l2', default=0, type=float)
# Group 1
parser.add_argument('--earlystopPGD', action='store_true') # whether use early stop in PGD
parser.add_argument('--earlystopPGDepoch1', default=60, type=int)
parser.add_argument('--earlystopPGDepoch2', default=100, type=int)
parser.add_argument('--warmup_lr', action='store_true') # whether warm_up lr from 0 to max_lr in the first n epochs
parser.add_argument('--warmup_lr_epoch', default=15, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)#weight decay
parser.add_argument('--warmup_eps', action='store_true') # whether warm_up eps from 0 to 8/255 in the first n epochs
parser.add_argument('--warmup_eps_epoch', default=15, type=int)
parser.add_argument('--batch-size', default=128, type=int) #batch size
parser.add_argument('--labelsmooth', action='store_true') # whether use label smoothing
parser.add_argument('--labelsmoothvalue', default=0.0, type=float)
parser.add_argument('--lrdecay', default='base', type=str, choices=['intenselr', 'base', 'looselr', 'lineardecay'])
# Group 2
parser.add_argument('--use_DLRloss', action='store_true') # whether use DLRloss
parser.add_argument('--use_CWloss', action='store_true') # whether use CWloss
parser.add_argument('--use_multitarget', action='store_true') # whether use multitarget
parser.add_argument('--use_stronger_adv', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--stronger_index', default=0, type=int)
parser.add_argument('--use_FNandWN', action='store_true') # whether use FN and WN
parser.add_argument('--use_adaptive', action='store_true') # whether use s in attack during training
parser.add_argument('--s_FN', default=15, type=float) # s in FN
parser.add_argument('--m_FN', default=0.2, type=float) # s in FN
parser.add_argument('--use_FNonly', action='store_true') # whether use FN only
parser.add_argument('--fast_better', action='store_true')
parser.add_argument('--BNeval', action='store_true') # whether use eval mode for BN when crafting adversarial examples
parser.add_argument('--focalloss', action='store_true') # whether use focalloss
parser.add_argument('--focallosslambda', default=2., type=float)
parser.add_argument('--activation', default='ReLU', type=str)
parser.add_argument('--softplus_beta', default=1., type=float)
parser.add_argument('--optimizer', default='momentum', choices=['momentum', 'SGD', 'Nesterov', 'SGD_GC', 'SGD_GCC', 'Adam', 'AdamW', 'PNM', 'AdaPNM'])
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
return parser.parse_args()
def get_auto_fname(args):
names = args.model + '_' + args.lr_schedule + '_eps' + str(args.epsilon) + '_bs' + str(args.batch_size) + '_maxlr' + str(args.lr_max)
# Group 1
if args.earlystopPGD:
names = names + '_earlystopPGD' + str(args.earlystopPGDepoch1) + str(args.earlystopPGDepoch2)
if args.warmup_lr:
names = names + '_warmuplr' + str(args.warmup_lr_epoch)
if args.warmup_eps:
names = names + '_warmupeps' + str(args.warmup_eps_epoch)
if args.weight_decay != 5e-4:
names = names + '_wd' + str(args.weight_decay)
if args.labelsmooth:
names = names + '_ls' + str(args.labelsmoothvalue)
# Group 2
if args.use_stronger_adv:
names = names + '_usestrongeradv#' + str(args.stronger_index)
if args.use_multitarget:
names = names + '_usemultitarget'
if args.use_DLRloss:
names = names + '_useDLRloss'
if args.use_CWloss:
names = names + '_useCWloss'
if args.use_FNandWN:
names = names + '_HE' + 's' + str(args.s_FN) + 'm' + str(args.m_FN)
if args.use_adaptive:
names = names + 'adaptive'
if args.use_FNonly:
names = names + '_FNonly'
if args.fast_better:
names = names + '_fastbetter'
if args.activation != 'ReLU':
names = names + '_' + args.activation
if args.activation == 'Softplus':
names = names + str(args.softplus_beta)
if args.lrdecay != 'base':
names = names + '_' + args.lrdecay
if args.BNeval:
names = names + '_BNeval'
if args.focalloss:
names = names + '_focalloss' + str(args.focallosslambda)
if args.optimizer != 'momentum':
names = names + '_' + args.optimizer
if args.mixup:
names = names + '_mixup' + str(args.mixup_alpha)
if args.cutout:
names = names + '_cutout' + str(args.cutout_len)
if args.attack != 'pgd':
names = names + '_' + args.attack
print('File name: ', names)
return names
def main():
args = get_args()
if args.fname == 'auto':
names = get_auto_fname(args)
args.fname = 'trained_models/' + names
else:
args.fname = 'trained_models/' + args.fname
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
logger.info(args)
# Set seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Prepare data
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=4)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=4)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=4)
# Set perturbations
epsilon = (args.epsilon / 255.)
test_epsilon = (args.test_epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
test_pgd_alpha = (args.test_pgd_alpha / 255.)
# Set models
if args.model == 'VGG':
model = VGG('VGG19')
elif args.model == 'ResNet18':
model = ResNet18()
elif args.model == 'GoogLeNet':
model = GoogLeNet()
elif args.model == 'DenseNet121':
model = DenseNet121()
elif args.model == 'DenseNet201':
model = DenseNet201()
elif args.model == 'ResNeXt29':
model = ResNeXt29_2x64d()
elif args.model == 'ResNeXt29L':
model = ResNeXt29_32x4d()
elif args.model == 'MobileNet':
model = MobileNet()
elif args.model == 'MobileNetV2':
model = MobileNetV2()
elif args.model == 'DPN26':
model = DPN26()
elif args.model == 'DPN92':
model = DPN92()
elif args.model == 'ShuffleNetG2':
model = ShuffleNetG2()
elif args.model == 'SENet18':
model = SENet18()
elif args.model == 'ShuffleNetV2':
model = ShuffleNetV2(1)
elif args.model == 'EfficientNetB0':
model = EfficientNetB0()
elif args.model == 'PNASNetA':
model = PNASNetA()
elif args.model == 'RegNetX':
model = RegNetX_200MF()
elif args.model == 'RegNetLX':
model = RegNetX_400MF()
elif args.model == 'PreActResNet50':
model = PreActResNet50()
elif args.model == 'PreActResNet18':
model = PreActResNet18(normalize_only_FN=args.use_FNonly, normalize=args.use_FNandWN, scale=args.s_FN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet':
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet_20':
model = WideResNet(34, 10, widen_factor=20, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
model.train()
# Set training hyperparameters
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
if args.lr_schedule == 'cyclic':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
if args.optimizer == 'momentum':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0, weight_decay=args.weight_decay)
elif args.optimizer == 'Nesterov':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)
elif args.optimizer == 'SGD_GC':
opt = SGD_GC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD_GCC':
opt = SGD_GCC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamW':
opt = torch.optim.AdamW(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'PNM':
opt = PNM(params, lr=args.lr_max, betas=(0.9, 1.), weight_decay=args.weight_decay)
elif args.optimizer == 'AdaPNM':
opt = AdaPNM(params, lr=args.lr_max, betas=(0.9, 0.999, 1.), eps=1e-08, weight_decay=args.weight_decay)
# Cross-entropy (mean)
if args.labelsmooth:
criterion = LabelSmoothingLoss(smoothing=args.labelsmoothvalue)
else:
criterion = nn.CrossEntropyLoss()
# If we use freeAT or fastAT with previous init
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
# Set lr schedule
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t, warm_up_lr = args.warmup_lr):
if t < 100:
if warm_up_lr and t < args.warmup_lr_epoch:
return (t + 1.) / args.warmup_lr_epoch * args.lr_max
else:
return args.lr_max
if args.lrdecay == 'lineardecay':
if t < 105:
return args.lr_max * 0.02 * (105 - t)
else:
return 0.
elif args.lrdecay == 'intenselr':
if t < 102:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'looselr':
if t < 150:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'base':
if t < 105:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
elif args.lr_schedule == 'cyclic':
def lr_schedule(t, stepsize=18, min_lr=1e-5, max_lr=args.lr_max):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Additional function to see where on the cycle we are
cycle = math.floor(1 + t / (2 * stepsize))
x = abs(t / stepsize - 2 * cycle + 1)
relative = max(0, (1 - x)) * scaler(cycle)
return min_lr + (max_lr - min_lr) * relative
#### Set stronger adv attacks when decay the lr ####
def eps_alpha_schedule(t, warm_up_eps = args.warmup_eps, if_use_stronger_adv=args.use_stronger_adv, stronger_index=args.stronger_index): # Schedule number 0
if stronger_index == 0:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha, pgd_alpha]
elif stronger_index == 1:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha * 1.25, pgd_alpha * 1.5]
elif stronger_index == 2:
epsilon_s = [epsilon * 2, epsilon * 2.5]
pgd_alpha_s = [pgd_alpha * 1.5, pgd_alpha * 2]
else:
print('Undefined stronger index')
if if_use_stronger_adv:
if t < 100:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
elif t < 105:
return epsilon_s[0], pgd_alpha_s[0], args.restarts
else:
return epsilon_s[1], pgd_alpha_s[1], args.restarts
else:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
#### Set the counter for the early stop of PGD ####
def early_stop_counter_schedule(t):
if t < args.earlystopPGDepoch1:
return 1
elif t < args.earlystopPGDepoch2:
return 2
else:
return 3
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
# logger.info('Epoch \t Train Time \t Test Time \t LR \t Train Loss \t Train Grad \t Train Acc \t Train Robust Loss \t Train Robust Acc || \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
logger.info('Epoch \t Train Acc \t Train Robust Acc \t Test Acc \t Test Robust Acc')
# Records per epoch for savetxt
train_loss_record = []
train_acc_record = []
train_robust_loss_record = []
train_robust_acc_record = []
train_grad_record = []
train_time_record = []
test_loss_record = []
test_acc_record = []
test_robust_loss_record = []
test_robust_acc_record = []
test_grad_record = []
test_time_record = []
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
train_grad = 0
record_iter = torch.tensor([])
for i, batch in enumerate(train_batches):
if args.eval:
break
X, y = batch['input'], batch['target']
onehot_target_withmargin_HE = args.m_FN * args.s_FN * torch.nn.functional.one_hot(y, num_classes=10)
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
epoch_now = epoch + (i + 1) / len(train_batches)
lr = lr_schedule(epoch_now)
opt.param_groups[0].update(lr=lr)
if args.attack == 'pgd':
# Random initialization
epsilon_sche, pgd_alpha_sche, restarts_sche = eps_alpha_schedule(epoch_now)
early_counter_max = early_stop_counter_schedule(epoch_now)
if args.mixup:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max,
mixup=True, y_a=y_a, y_b=y_b, lam=lam)
else:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max, multitarget=args.use_multitarget,
use_DLRloss=args.use_DLRloss, use_CWloss=args.use_CWloss,
epoch=epoch_now, totalepoch=args.epochs, gamma=0.8,
use_adaptive=args.use_adaptive, s_HE=args.s_FN,
fast_better=args.fast_better, BNeval=args.BNeval)
record_iter = torch.cat((record_iter, iter_counts))
delta = delta.detach()
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
# Standard training
elif args.attack == 'none':
delta = torch.zeros_like(X)
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
# Training losses
if args.mixup:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
elif args.mixture:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = args.mixture_alpha * criterion(robust_output, y) + (1-args.mixture_alpha) * criterion(output, y)
else:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.focalloss:
criterion_nonreduct = nn.CrossEntropyLoss(reduction='none')
robust_confidence = F.softmax(robust_output, dim=1)[:, y].detach()
robust_loss = (criterion_nonreduct(robust_output, y) * ((1. - robust_confidence) ** args.focallosslambda)).mean()
elif args.use_DLRloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * dlr_loss(robust_output, y)
elif args.use_CWloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * CW_loss(robust_output, y)
elif args.use_FNandWN:
#print('use FN and WN with margin')
robust_loss = criterion(args.s_FN * robust_output - onehot_target_withmargin_HE, y)
else:
robust_loss = criterion(robust_output, y)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss.backward()
opt.step()
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
# Record the statstic values
train_robust_loss += robust_loss.item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
train_grad += input_grads.abs().sum()
train_time = time.time()
if args.earlystopPGD:
print('Iter mean: ', record_iter.mean().item(), ' Iter std: ', record_iter.std().item())
print('Learning rate: ', lr)
#print('Eps: ', epsilon_sche)
# Evaluate on test data
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
test_grad = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, test_pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
robust_loss = criterion(robust_output, y)
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
test_robust_loss += robust_loss.item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
test_grad += input_grads.abs().sum()
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
# logger.info('%d \t %.1f \t %.1f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f %.4f \t %.4f \t %.4f',
# epoch, train_time - start_time, test_time - train_time, lr,
# train_loss/train_n, train_grad/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
# test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, train_acc/train_n, train_robust_acc/train_n, test_acc/test_n, test_robust_acc/test_n)
# Save results
train_loss_record.append(train_loss/train_n)
train_acc_record.append(train_acc/train_n)
train_robust_loss_record.append(train_robust_loss/train_n)
train_robust_acc_record.append(train_robust_acc/train_n)
train_grad_record.append(train_grad/train_n)
train_time_record.append(train_time - start_time)
np.savetxt(args.fname+'/train_loss_record.txt', np.array(train_loss_record))
np.savetxt(args.fname+'/train_acc_record.txt', np.array(train_acc_record))
np.savetxt(args.fname+'/train_robust_loss_record.txt', np.array(train_robust_loss_record))
np.savetxt(args.fname+'/train_robust_acc_record.txt', np.array(train_robust_acc_record))
np.savetxt(args.fname+'/train_grad_record.txt', np.array(train_grad_record))
np.savetxt(args.fname+'/train_time_record.txt', np.array(train_time_record))
test_loss_record.append(test_loss/train_n)
test_acc_record.append(test_acc/train_n)
test_robust_loss_record.append(test_robust_loss/train_n)
test_robust_acc_record.append(test_robust_acc/train_n)
test_grad_record.append(test_grad/train_n)
test_time_record.append(test_time - train_time)
np.savetxt(args.fname+'/test_loss_record.txt', np.array(test_loss_record))
np.savetxt(args.fname+'/test_acc_record.txt', np.array(test_acc_record))
np.savetxt(args.fname+'/test_robust_loss_record.txt', np.array(test_robust_loss_record))
np.savetxt(args.fname+'/test_robust_acc_record.txt', np.array(test_robust_acc_record))
np.savetxt(args.fname+'/test_grad_record.txt', np.array(test_grad_record))
np.savetxt(args.fname+'/test_time_record.txt', np.array(test_time_record))
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
# if epoch > 99 or (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
# torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
# torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
| 40,722 | 41.287643 | 208 | py |
DEAT | DEAT-main/eval_cifar.py | import argparse
import copy
import logging
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from preactresnet import PreActResNet18
from wideresnet import WideResNet
from utils_plus import (upper_limit, lower_limit, std, clamp, get_loaders,
attack_pgd, evaluate_pgd, evaluate_standard, normalize)
from autoattack import AutoAttack
# installing AutoAttack by: pip install git+https://github.com/fra31/auto-attack
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--out-dir', default='train_fgsm_output', type=str, help='Output directory')
parser.add_argument('--seed', default=0, type=int, help='Random seed')
return parser.parse_args()
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.StreamHandler()
])
logger.info(args)
_, test_loader = get_loaders(args.data_dir, args.batch_size)
best_state_dict = torch.load(os.path.join(args.out_dir, 'model_best.pth'))
# Evaluation
model_test = PreActResNet18().cuda()
# model_test = WideResNet(34, 10, widen_factor=10, dropRate=0.0)
model_test = nn.DataParallel(model_test).cuda()
if 'state_dict' in best_state_dict.keys():
model_test.load_state_dict(best_state_dict['state_dict'])
else:
model_test.load_state_dict(best_state_dict)
model_test.float()
model_test.eval()
### Evaluate clean acc ###
_, test_acc = evaluate_standard(test_loader, model_test)
print('Clean acc: ', test_acc)
### Evaluate PGD (CE loss) acc ###
_, pgd_acc_CE = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=False)
print('PGD-10 (10 restarts, step 2, CE loss) acc: ', pgd_acc_CE)
### Evaluate PGD (CW loss) acc ###
_, pgd_acc_CW = evaluate_pgd(test_loader, model_test, attack_iters=10, restarts=1, eps=8, step=2, use_CWloss=True)
print('PGD-10 (10 restarts, step 2, CW loss) acc: ', pgd_acc_CW)
### Evaluate AutoAttack ###
l = [x for (x, y) in test_loader]
x_test = torch.cat(l, 0)
l = [y for (x, y) in test_loader]
y_test = torch.cat(l, 0)
class normalize_model():
def __init__(self, model):
self.model_test = model
def __call__(self, x):
return self.model_test(normalize(x))
new_model = normalize_model(model_test)
epsilon = 8 / 255.
adversary = AutoAttack(new_model, norm='Linf', eps=epsilon, version='standard')
X_adv = adversary.run_standard_evaluation(x_test, y_test, bs=128)
if __name__ == "__main__":
main()
| 3,080 | 32.129032 | 119 | py |
DEAT | DEAT-main/utils_plus.py | #import apex.amp as amp
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
upper_limit, lower_limit = 1, 0
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_loaders(dir_, batch_size, DATASET='CIFAR10'):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
test_transform = transforms.Compose([
transforms.ToTensor()
])
num_workers = 2
if DATASET == 'CIFAR10':
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR10(
dir_, train=False, transform=test_transform, download=True)
elif DATASET == 'CIFAR100':
train_dataset = datasets.CIFAR100(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR100(
dir_, train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
return train_loader, test_loader
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for _ in range(restarts):
delta = torch.zeros_like(X).cuda()
delta.uniform_(-epsilon, epsilon)
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(normalize(X + delta))
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
if use_CWloss:
loss = CW_loss(output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = torch.clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(normalize(X + delta)), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def evaluate_pgd(test_loader, model, attack_iters, restarts, eps=8, step=2, use_CWloss=False):
epsilon = eps / 255.
alpha = step / 255.
pgd_loss = 0
pgd_acc = 0
n = 0
model.eval()
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
pgd_delta = attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, use_CWloss=use_CWloss)
with torch.no_grad():
output = model(normalize(X + pgd_delta))
loss = F.cross_entropy(output, y)
pgd_loss += loss.item() * y.size(0)
pgd_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return pgd_loss/n, pgd_acc/n
def evaluate_standard(test_loader, model):
test_loss = 0
test_acc = 0
n = 0
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(normalize(X))
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
return test_loss/n, test_acc/n
| 4,589 | 34.307692 | 106 | py |
DEAT | DEAT-main/train_cifar.py | import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from wideresnet import WideResNet
from preactresnet import PreActResNet18, PreActResNet50
from models import *
from utils import *
mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
std = torch.tensor(cifar10_std).view(3,1,1).cuda()
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes=10, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def dlr_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1]
* (1. - ind)) / (x_sorted[:, -1] - x_sorted[:, -3] + 1e-12)
return loss_value.mean()
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, mixup=False, y_a=None, y_b=None, lam=None,
early_stop=False, early_stop_pgd_max=1,
multitarget=False,
use_DLRloss=False, use_CWloss=False,
epoch=0, totalepoch=110, gamma=0.8,
use_adaptive=False, s_HE=15,
fast_better=False, BNeval=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
if BNeval:
model.eval()
for _ in range(restarts):
# early stop pgd counter for each x
early_stop_pgd_count = early_stop_pgd_max * torch.ones(y.shape[0], dtype=torch.int32).cuda()
# initialize perturbation
delta = torch.zeros_like(X).cuda()
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
iter_count = torch.zeros(y.shape[0])
# craft adversarial examples
for _ in range(attack_iters):
output = model(normalize(X + delta))
# if use early stop pgd
if early_stop:
# calculate mask for early stop pgd
if_success_fool = (output.max(1)[1] != y).to(dtype=torch.int32)
early_stop_pgd_count = early_stop_pgd_count - if_success_fool
index = torch.where(early_stop_pgd_count > 0)[0]
iter_count[index] = iter_count[index] + 1
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
# Whether use mixup criterion
if fast_better:
loss_ori = F.cross_entropy(output, y)
grad_ori = torch.autograd.grad(loss_ori, delta, create_graph=True)[0]
loss_grad = (alpha / 4.) * (torch.norm(grad_ori.view(grad_ori.shape[0], -1), p=2, dim=1) ** 2)
loss = loss_ori + loss_grad.mean()
loss.backward()
grad = delta.grad.detach()
elif not mixup:
if multitarget:
random_label = torch.randint(low=0, high=10, size=y.shape).cuda()
random_direction = 2*((random_label == y).to(dtype=torch.float32) - 0.5)
loss = torch.mean(random_direction * F.cross_entropy(output, random_label, reduction='none'))
loss.backward()
grad = delta.grad.detach()
elif use_DLRloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * dlr_loss(output, y)
loss.backward()
grad = delta.grad.detach()
elif use_CWloss:
beta_ = gamma * epoch / totalepoch
loss = (1. - beta_) * F.cross_entropy(output, y) + beta_ * CW_loss(output, y)
loss.backward()
grad = delta.grad.detach()
else:
if use_adaptive:
loss = F.cross_entropy(s_HE * output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
else:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
if BNeval:
model.train()
return max_delta, iter_count
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--data-dir', default='../cifar-data', type=str)
parser.add_argument('--epochs', default=110, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'onedrop', 'multipledecay', 'cosine', 'cyclic'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--test_epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--test-pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=100, type=int)
parser.add_argument('--mixture', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--mixture_alpha', type=float)
parser.add_argument('--l2', default=0, type=float)
# Group 1
parser.add_argument('--earlystopPGD', action='store_true') # whether use early stop in PGD
parser.add_argument('--earlystopPGDepoch1', default=60, type=int)
parser.add_argument('--earlystopPGDepoch2', default=100, type=int)
parser.add_argument('--warmup_lr', action='store_true') # whether warm_up lr from 0 to max_lr in the first n epochs
parser.add_argument('--warmup_lr_epoch', default=15, type=int)
parser.add_argument('--weight_decay', default=5e-4, type=float)#weight decay
parser.add_argument('--warmup_eps', action='store_true') # whether warm_up eps from 0 to 8/255 in the first n epochs
parser.add_argument('--warmup_eps_epoch', default=15, type=int)
parser.add_argument('--batch-size', default=128, type=int) #batch size
parser.add_argument('--labelsmooth', action='store_true') # whether use label smoothing
parser.add_argument('--labelsmoothvalue', default=0.0, type=float)
parser.add_argument('--lrdecay', default='base', type=str, choices=['intenselr', 'base', 'looselr', 'lineardecay'])
# Group 2
parser.add_argument('--use_DLRloss', action='store_true') # whether use DLRloss
parser.add_argument('--use_CWloss', action='store_true') # whether use CWloss
parser.add_argument('--use_multitarget', action='store_true') # whether use multitarget
parser.add_argument('--use_stronger_adv', action='store_true') # whether use mixture of clean and adv examples in a mini-batch
parser.add_argument('--stronger_index', default=0, type=int)
parser.add_argument('--use_FNandWN', action='store_true') # whether use FN and WN
parser.add_argument('--use_adaptive', action='store_true') # whether use s in attack during training
parser.add_argument('--s_FN', default=15, type=float) # s in FN
parser.add_argument('--m_FN', default=0.2, type=float) # s in FN
parser.add_argument('--use_FNonly', action='store_true') # whether use FN only
parser.add_argument('--fast_better', action='store_true')
parser.add_argument('--BNeval', action='store_true') # whether use eval mode for BN when crafting adversarial examples
parser.add_argument('--focalloss', action='store_true') # whether use focalloss
parser.add_argument('--focallosslambda', default=2., type=float)
parser.add_argument('--activation', default='ReLU', type=str)
parser.add_argument('--softplus_beta', default=1., type=float)
parser.add_argument('--optimizer', default='momentum', choices=['momentum', 'Nesterov', 'SGD_GC', 'SGD_GCC', 'Adam', 'AdamW'])
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
return parser.parse_args()
def get_auto_fname(args):
names = args.model + '_' + args.lr_schedule + '_eps' + str(args.epsilon) + '_bs' + str(args.batch_size) + '_maxlr' + str(args.lr_max)
# Group 1
if args.earlystopPGD:
names = names + '_earlystopPGD' + str(args.earlystopPGDepoch1) + str(args.earlystopPGDepoch2)
if args.warmup_lr:
names = names + '_warmuplr' + str(args.warmup_lr_epoch)
if args.warmup_eps:
names = names + '_warmupeps' + str(args.warmup_eps_epoch)
if args.weight_decay != 5e-4:
names = names + '_wd' + str(args.weight_decay)
if args.labelsmooth:
names = names + '_ls' + str(args.labelsmoothvalue)
# Group 2
if args.use_stronger_adv:
names = names + '_usestrongeradv#' + str(args.stronger_index)
if args.use_multitarget:
names = names + '_usemultitarget'
if args.use_DLRloss:
names = names + '_useDLRloss'
if args.use_CWloss:
names = names + '_useCWloss'
if args.use_FNandWN:
names = names + '_HE' + 's' + str(args.s_FN) + 'm' + str(args.m_FN)
if args.use_adaptive:
names = names + 'adaptive'
if args.use_FNonly:
names = names + '_FNonly'
if args.fast_better:
names = names + '_fastbetter'
if args.activation != 'ReLU':
names = names + '_' + args.activation
if args.activation == 'Softplus':
names = names + str(args.softplus_beta)
if args.lrdecay != 'base':
names = names + '_' + args.lrdecay
if args.BNeval:
names = names + '_BNeval'
if args.focalloss:
names = names + '_focalloss' + str(args.focallosslambda)
if args.optimizer != 'momentum':
names = names + '_' + args.optimizer
if args.mixup:
names = names + '_mixup' + str(args.mixup_alpha)
if args.cutout:
names = names + '_cutout' + str(args.cutout_len)
if args.attack != 'pgd':
names = names + '_' + args.attack
print('File name: ', names)
return names
def main():
args = get_args()
if args.fname == 'auto':
names = get_auto_fname(args)
args.fname = 'trained_models/' + names
else:
args.fname = 'trained_models/' + args.fname
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
logger.info(args)
# Set seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Prepare data
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=4)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=4)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=4)
# Set perturbations
epsilon = (args.epsilon / 255.)
test_epsilon = (args.test_epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
test_pgd_alpha = (args.test_pgd_alpha / 255.)
# Set models
if args.model == 'VGG':
model = VGG('VGG19')
elif args.model == 'ResNet18':
model = ResNet18()
elif args.model == 'GoogLeNet':
model = GoogLeNet()
elif args.model == 'DenseNet121':
model = DenseNet121()
elif args.model == 'DenseNet201':
model = DenseNet201()
elif args.model == 'ResNeXt29':
model = ResNeXt29_2x64d()
elif args.model == 'ResNeXt29L':
model = ResNeXt29_32x4d()
elif args.model == 'MobileNet':
model = MobileNet()
elif args.model == 'MobileNetV2':
model = MobileNetV2()
elif args.model == 'DPN26':
model = DPN26()
elif args.model == 'DPN92':
model = DPN92()
elif args.model == 'ShuffleNetG2':
model = ShuffleNetG2()
elif args.model == 'SENet18':
model = SENet18()
elif args.model == 'ShuffleNetV2':
model = ShuffleNetV2(1)
elif args.model == 'EfficientNetB0':
model = EfficientNetB0()
elif args.model == 'PNASNetA':
model = PNASNetA()
elif args.model == 'RegNetX':
model = RegNetX_200MF()
elif args.model == 'RegNetLX':
model = RegNetX_400MF()
elif args.model == 'PreActResNet50':
model = PreActResNet50()
elif args.model == 'PreActResNet18':
model = PreActResNet18(normalize_only_FN=args.use_FNonly, normalize=args.use_FNandWN, scale=args.s_FN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet':
model = WideResNet(34, 10, widen_factor=10, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
elif args.model == 'WideResNet_20':
model = WideResNet(34, 10, widen_factor=20, dropRate=0.0, normalize=args.use_FNandWN,
activation=args.activation, softplus_beta=args.softplus_beta)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).cuda()
model.train()
# Set training hyperparameters
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
if args.lr_schedule == 'cyclic':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
if args.optimizer == 'momentum':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Nesterov':
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay, nesterov=True)
elif args.optimizer == 'SGD_GC':
opt = SGD_GC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD_GCC':
opt = SGD_GCC(params, lr=args.lr_max, momentum=0.9, weight_decay=args.weight_decay)
elif args.optimizer == 'Adam':
opt = torch.optim.Adam(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamW':
opt = torch.optim.AdamW(params, lr=args.lr_max, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
# Cross-entropy (mean)
if args.labelsmooth:
criterion = LabelSmoothingLoss(smoothing=args.labelsmoothvalue)
else:
criterion = nn.CrossEntropyLoss()
# If we use freeAT or fastAT with previous init
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).cuda()
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
# Set lr schedule
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t, warm_up_lr = args.warmup_lr):
if t < 100:
if warm_up_lr and t < args.warmup_lr_epoch:
return (t + 1.) / args.warmup_lr_epoch * args.lr_max
else:
return args.lr_max
if args.lrdecay == 'lineardecay':
if t < 105:
return args.lr_max * 0.02 * (105 - t)
else:
return 0.
elif args.lrdecay == 'intenselr':
if t < 102:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'looselr':
if t < 150:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lrdecay == 'base':
if t < 105:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
elif args.lr_schedule == 'cyclic':
def lr_schedule(t, stepsize=18, min_lr=1e-5, max_lr=args.lr_max):
# Scaler: we can adapt this if we do not want the triangular CLR
scaler = lambda x: 1.
# Additional function to see where on the cycle we are
cycle = math.floor(1 + t / (2 * stepsize))
x = abs(t / stepsize - 2 * cycle + 1)
relative = max(0, (1 - x)) * scaler(cycle)
return min_lr + (max_lr - min_lr) * relative
#### Set stronger adv attacks when decay the lr ####
def eps_alpha_schedule(t, warm_up_eps = args.warmup_eps, if_use_stronger_adv=args.use_stronger_adv, stronger_index=args.stronger_index): # Schedule number 0
if stronger_index == 0:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha, pgd_alpha]
elif stronger_index == 1:
epsilon_s = [epsilon * 1.5, epsilon * 2]
pgd_alpha_s = [pgd_alpha * 1.25, pgd_alpha * 1.5]
elif stronger_index == 2:
epsilon_s = [epsilon * 2, epsilon * 2.5]
pgd_alpha_s = [pgd_alpha * 1.5, pgd_alpha * 2]
else:
print('Undefined stronger index')
if if_use_stronger_adv:
if t < 100:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
elif t < 105:
return epsilon_s[0], pgd_alpha_s[0], args.restarts
else:
return epsilon_s[1], pgd_alpha_s[1], args.restarts
else:
if t < args.warmup_eps_epoch and warm_up_eps:
return (t + 1.) / args.warmup_eps_epoch * epsilon, pgd_alpha, args.restarts
else:
return epsilon, pgd_alpha, args.restarts
#### Set the counter for the early stop of PGD ####
def early_stop_counter_schedule(t):
if t < args.earlystopPGDepoch1:
return 1
elif t < args.earlystopPGDepoch2:
return 2
else:
return 3
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
# logger.info('Epoch \t Train Time \t Test Time \t LR \t Train Loss \t Train Grad \t Train Acc \t Train Robust Loss \t Train Robust Acc || \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
logger.info('Epoch \t Train Acc \t Train Robust Acc \t Test Acc \t Test Robust Acc')
# Records per epoch for savetxt
train_loss_record = []
train_acc_record = []
train_robust_loss_record = []
train_robust_acc_record = []
train_grad_record = []
test_loss_record = []
test_acc_record = []
test_robust_loss_record = []
test_robust_acc_record = []
test_grad_record = []
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
train_grad = 0
record_iter = torch.tensor([])
for i, batch in enumerate(train_batches):
if args.eval:
break
X, y = batch['input'], batch['target']
onehot_target_withmargin_HE = args.m_FN * args.s_FN * torch.nn.functional.one_hot(y, num_classes=10)
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
epoch_now = epoch + (i + 1) / len(train_batches)
lr = lr_schedule(epoch_now)
opt.param_groups[0].update(lr=lr)
if args.attack == 'pgd':
# Random initialization
epsilon_sche, pgd_alpha_sche, restarts_sche = eps_alpha_schedule(epoch_now)
early_counter_max = early_stop_counter_schedule(epoch_now)
if args.mixup:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max,
mixup=True, y_a=y_a, y_b=y_b, lam=lam)
else:
delta, iter_counts = attack_pgd(model, X, y, epsilon_sche, pgd_alpha_sche, args.attack_iters, restarts_sche, args.norm,
early_stop=args.earlystopPGD, early_stop_pgd_max=early_counter_max, multitarget=args.use_multitarget,
use_DLRloss=args.use_DLRloss, use_CWloss=args.use_CWloss,
epoch=epoch_now, totalepoch=args.epochs, gamma=0.8,
use_adaptive=args.use_adaptive, s_HE=args.s_FN,
fast_better=args.fast_better, BNeval=args.BNeval)
record_iter = torch.cat((record_iter, iter_counts))
delta = delta.detach()
elif args.attack == 'fgsm':
delta,_ = attack_pgd(model, X, y, epsilon, args.fgsm_alpha*epsilon, 1, 1, args.norm, fast_better=args.fast_better)
delta = delta.detach()
# Standard training
elif args.attack == 'none':
delta = torch.zeros_like(X)
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
# Training losses
if args.mixup:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
elif args.mixture:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
robust_loss = args.mixture_alpha * criterion(robust_output, y) + (1-args.mixture_alpha) * criterion(output, y)
else:
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.focalloss:
criterion_nonreduct = nn.CrossEntropyLoss(reduction='none')
robust_confidence = F.softmax(robust_output, dim=1)[:, y].detach()
robust_loss = (criterion_nonreduct(robust_output, y) * ((1. - robust_confidence) ** args.focallosslambda)).mean()
elif args.use_DLRloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * dlr_loss(robust_output, y)
elif args.use_CWloss:
beta_ = 0.8 * epoch_now / args.epochs
robust_loss = (1. - beta_) * F.cross_entropy(robust_output, y) + beta_ * CW_loss(robust_output, y)
elif args.use_FNandWN:
#print('use FN and WN with margin')
robust_loss = criterion(args.s_FN * robust_output - onehot_target_withmargin_HE, y)
else:
robust_loss = criterion(robust_output, y)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss.backward()
opt.step()
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
# Record the statstic values
train_robust_loss += robust_loss.item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += loss.item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
train_grad += input_grads.abs().sum()
train_time = time.time()
if args.earlystopPGD:
print('Iter mean: ', record_iter.mean().item(), ' Iter std: ', record_iter.std().item())
print('Learning rate: ', lr)
#print('Eps: ', epsilon_sche)
# Evaluate on test data
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
test_grad = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, test_pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
adv_input = normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit))
adv_input.requires_grad = True
robust_output = model(adv_input)
robust_loss = criterion(robust_output, y)
clean_input = normalize(X)
clean_input.requires_grad = True
output = model(clean_input)
loss = criterion(output, y)
# Get the gradient norm values
input_grads = torch.autograd.grad(loss, clean_input, create_graph=False)[0]
test_robust_loss += robust_loss.item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
test_grad += input_grads.abs().sum()
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta, _ = attack_pgd(model, X, y, test_epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=False)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
# logger.info('%d \t %.1f \t %.1f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f \t %.4f %.4f \t %.4f \t %.4f',
# epoch, train_time - start_time, test_time - train_time, lr,
# train_loss/train_n, train_grad/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
# test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
logger.info('%d \t %.4f \t %.4f \t %.4f \t %.4f',
epoch, train_acc/train_n, train_robust_acc/train_n, test_acc/test_n, test_robust_acc/test_n)
# Save results
train_loss_record.append(train_loss/train_n)
train_acc_record.append(train_acc/train_n)
train_robust_loss_record.append(train_robust_loss/train_n)
train_robust_acc_record.append(train_robust_acc/train_n)
train_grad_record.append(train_grad/train_n)
np.savetxt(args.fname+'/train_loss_record.txt', np.array(train_loss_record))
np.savetxt(args.fname+'/train_acc_record.txt', np.array(train_acc_record))
np.savetxt(args.fname+'/train_robust_loss_record.txt', np.array(train_robust_loss_record))
np.savetxt(args.fname+'/train_robust_acc_record.txt', np.array(train_robust_acc_record))
np.savetxt(args.fname+'/train_grad_record.txt', np.array(train_grad_record))
test_loss_record.append(test_loss/train_n)
test_acc_record.append(test_acc/train_n)
test_robust_loss_record.append(test_robust_loss/train_n)
test_robust_acc_record.append(test_robust_acc/train_n)
test_grad_record.append(test_grad/train_n)
np.savetxt(args.fname+'/test_loss_record.txt', np.array(test_loss_record))
np.savetxt(args.fname+'/test_acc_record.txt', np.array(test_acc_record))
np.savetxt(args.fname+'/test_robust_loss_record.txt', np.array(test_robust_loss_record))
np.savetxt(args.fname+'/test_robust_acc_record.txt', np.array(test_robust_acc_record))
np.savetxt(args.fname+'/test_grad_record.txt', np.array(test_grad_record))
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
if epoch > 99 or (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
| 39,863 | 40.962105 | 208 | py |
DEAT | DEAT-main/wideresnet.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
if activation == 'ReLU':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
print('R')
elif activation == 'Softplus':
self.relu1 = nn.Softplus(beta=softplus_beta, threshold=20)
self.relu2 = nn.Softplus(beta=softplus_beta, threshold=20)
print('S')
elif activation == 'GELU':
self.relu1 = nn.GELU()
self.relu2 = nn.GELU()
print('G')
elif activation == 'ELU':
self.relu1 = nn.ELU(alpha=1.0, inplace=True)
self.relu2 = nn.ELU(alpha=1.0, inplace=True)
print('E')
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, activation='ReLU', softplus_beta=1):
super(NetworkBlock, self).__init__()
self.activation = activation
self.softplus_beta = softplus_beta
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,
self.activation, self.softplus_beta))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, normalize=False, activation='ReLU', softplus_beta=1):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
self.normalize = normalize
#self.scale = scale
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 1st sub-block
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation=activation, softplus_beta=softplus_beta)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation=activation, softplus_beta=softplus_beta)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
if activation == 'ReLU':
self.relu = nn.ReLU(inplace=True)
elif activation == 'Softplus':
self.relu = nn.Softplus(beta=softplus_beta, threshold=20)
elif activation == 'GELU':
self.relu = nn.GELU()
elif activation == 'ELU':
self.relu = nn.ELU(alpha=1.0, inplace=True)
print('Use activation of ' + activation)
if self.normalize:
self.fc = nn.Linear(nChannels[3], num_classes, bias = False)
else:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) and not self.normalize:
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if self.normalize:
out = F.normalize(out, p=2, dim=1)
for _, module in self.fc.named_modules():
if isinstance(module, nn.Linear):
module.weight.data = F.normalize(module.weight, p=2, dim=1)
return self.fc(out) | 5,747 | 43.90625 | 141 | py |
DEAT | DEAT-main/models/shufflenetv2.py | '''ShuffleNetV2 in PyTorch.
See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups=2):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N, C, H, W = x.size()
g = self.groups
return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W)
class SplitBlock(nn.Module):
def __init__(self, ratio):
super(SplitBlock, self).__init__()
self.ratio = ratio
def forward(self, x):
c = int(x.size(1) * self.ratio)
return x[:, :c, :, :], x[:, c:, :, :]
class BasicBlock(nn.Module):
def __init__(self, in_channels, split_ratio=0.5):
super(BasicBlock, self).__init__()
self.split = SplitBlock(split_ratio)
in_channels = int(in_channels * split_ratio)
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.conv3 = nn.Conv2d(in_channels, in_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
x1, x2 = self.split(x)
out = F.relu(self.bn1(self.conv1(x2)))
out = self.bn2(self.conv2(out))
out = F.relu(self.bn3(self.conv3(out)))
out = torch.cat([x1, out], 1)
out = self.shuffle(out)
return out
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
mid_channels = out_channels // 2
# left
self.conv1 = nn.Conv2d(in_channels, in_channels,
kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv2 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
# right
self.conv3 = nn.Conv2d(in_channels, mid_channels,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(mid_channels)
self.conv4 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False)
self.bn4 = nn.BatchNorm2d(mid_channels)
self.conv5 = nn.Conv2d(mid_channels, mid_channels,
kernel_size=1, bias=False)
self.bn5 = nn.BatchNorm2d(mid_channels)
self.shuffle = ShuffleBlock()
def forward(self, x):
# left
out1 = self.bn1(self.conv1(x))
out1 = F.relu(self.bn2(self.conv2(out1)))
# right
out2 = F.relu(self.bn3(self.conv3(x)))
out2 = self.bn4(self.conv4(out2))
out2 = F.relu(self.bn5(self.conv5(out2)))
# concat
out = torch.cat([out1, out2], 1)
out = self.shuffle(out)
return out
class ShuffleNetV2(nn.Module):
def __init__(self, net_size):
super(ShuffleNetV2, self).__init__()
out_channels = configs[net_size]['out_channels']
num_blocks = configs[net_size]['num_blocks']
self.conv1 = nn.Conv2d(3, 24, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_channels = 24
self.layer1 = self._make_layer(out_channels[0], num_blocks[0])
self.layer2 = self._make_layer(out_channels[1], num_blocks[1])
self.layer3 = self._make_layer(out_channels[2], num_blocks[2])
self.conv2 = nn.Conv2d(out_channels[2], out_channels[3],
kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels[3])
self.linear = nn.Linear(out_channels[3], 10)
def _make_layer(self, out_channels, num_blocks):
layers = [DownBlock(self.in_channels, out_channels)]
for i in range(num_blocks):
layers.append(BasicBlock(out_channels))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# out = F.max_pool2d(out, 3, stride=2, padding=1)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
configs = {
0.5: {
'out_channels': (48, 96, 192, 1024),
'num_blocks': (3, 7, 3)
},
1: {
'out_channels': (116, 232, 464, 1024),
'num_blocks': (3, 7, 3)
},
1.5: {
'out_channels': (176, 352, 704, 1024),
'num_blocks': (3, 7, 3)
},
2: {
'out_channels': (224, 488, 976, 2048),
'num_blocks': (3, 7, 3)
}
}
def test():
net = ShuffleNetV2(net_size=0.5)
x = torch.randn(3, 3, 32, 32)
y = net(x)
print(y.shape)
# test()
| 5,530 | 32.932515 | 107 | py |
DEAT | DEAT-main/models/regnet.py | '''RegNet in PyTorch.
Paper: "Designing Network Design Spaces".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SE(nn.Module):
'''Squeeze-and-Excitation block.'''
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = F.relu(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
def __init__(self, w_in, w_out, stride, group_width, bottleneck_ratio, se_ratio):
super(Block, self).__init__()
# 1x1
w_b = int(round(w_out * bottleneck_ratio))
self.conv1 = nn.Conv2d(w_in, w_b, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(w_b)
# 3x3
num_groups = w_b // group_width
self.conv2 = nn.Conv2d(w_b, w_b, kernel_size=3,
stride=stride, padding=1, groups=num_groups, bias=False)
self.bn2 = nn.BatchNorm2d(w_b)
# se
self.with_se = se_ratio > 0
if self.with_se:
w_se = int(round(w_in * se_ratio))
self.se = SE(w_b, w_se)
# 1x1
self.conv3 = nn.Conv2d(w_b, w_out, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(w_out)
self.shortcut = nn.Sequential()
if stride != 1 or w_in != w_out:
self.shortcut = nn.Sequential(
nn.Conv2d(w_in, w_out,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(w_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
if self.with_se:
out = self.se(out)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class RegNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(RegNet, self).__init__()
self.cfg = cfg
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(0)
self.layer2 = self._make_layer(1)
self.layer3 = self._make_layer(2)
self.layer4 = self._make_layer(3)
self.linear = nn.Linear(self.cfg['widths'][-1], num_classes)
def _make_layer(self, idx):
depth = self.cfg['depths'][idx]
width = self.cfg['widths'][idx]
stride = self.cfg['strides'][idx]
group_width = self.cfg['group_width']
bottleneck_ratio = self.cfg['bottleneck_ratio']
se_ratio = self.cfg['se_ratio']
layers = []
for i in range(depth):
s = stride if i == 0 else 1
layers.append(Block(self.in_planes, width,
s, group_width, bottleneck_ratio, se_ratio))
self.in_planes = width
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def RegNetX_200MF():
cfg = {
'depths': [1, 1, 4, 7],
'widths': [24, 56, 152, 368],
'strides': [1, 1, 2, 2],
'group_width': 8,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetX_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0,
}
return RegNet(cfg)
def RegNetY_400MF():
cfg = {
'depths': [1, 2, 7, 12],
'widths': [32, 64, 160, 384],
'strides': [1, 1, 2, 2],
'group_width': 16,
'bottleneck_ratio': 1,
'se_ratio': 0.25,
}
return RegNet(cfg)
def test():
net = RegNetX_200MF()
print(net)
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 4,548 | 28.160256 | 106 | py |
DEAT | DEAT-main/models/efficientnet.py | '''EfficientNet in PyTorch.
Paper: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks".
Reference: https://github.com/keras-team/keras-applications/blob/master/keras_applications/efficientnet.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def drop_connect(x, drop_ratio):
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
class SE(nn.Module):
'''Squeeze-and-Excitation block with Swish.'''
def __init__(self, in_channels, se_channels):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_channels, se_channels,
kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_channels, in_channels,
kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
class Block(nn.Module):
'''expansion + depthwise + pointwise + squeeze-excitation'''
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
expand_ratio=1,
se_ratio=0.,
drop_rate=0.):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
# Expansion
channels = expand_ratio * in_channels
self.conv1 = nn.Conv2d(in_channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn1 = nn.BatchNorm2d(channels)
# Depthwise conv
self.conv2 = nn.Conv2d(channels,
channels,
kernel_size=kernel_size,
stride=stride,
padding=(1 if kernel_size == 3 else 2),
groups=channels,
bias=False)
self.bn2 = nn.BatchNorm2d(channels)
# SE layers
se_channels = int(in_channels * se_ratio)
self.se = SE(channels, se_channels)
# Output
self.conv3 = nn.Conv2d(channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
# Skip connection if in and out shapes are the same (MV-V2 style)
self.has_skip = (stride == 1) and (in_channels == out_channels)
def forward(self, x):
out = x if self.expand_ratio == 1 else swish(self.bn1(self.conv1(x)))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if self.training and self.drop_rate > 0:
out = drop_connect(out, self.drop_rate)
out = out + x
return out
class EfficientNet(nn.Module):
def __init__(self, cfg, num_classes=10):
super(EfficientNet, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_channels=32)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['expansion', 'out_channels', 'num_blocks', 'kernel_size',
'stride']]
b = 0
blocks = sum(self.cfg['num_blocks'])
for expansion, out_channels, num_blocks, kernel_size, stride in zip(*cfg):
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
drop_rate = self.cfg['drop_connect_rate'] * b / blocks
layers.append(
Block(in_channels,
out_channels,
kernel_size,
stride,
expansion,
se_ratio=0.25,
drop_rate=drop_rate))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = swish(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def EfficientNetB0():
cfg = {
'num_blocks': [1, 2, 2, 3, 3, 4, 1],
'expansion': [1, 6, 6, 6, 6, 6, 6],
'out_channels': [16, 24, 40, 80, 112, 192, 320],
'kernel_size': [3, 3, 5, 3, 5, 5, 3],
'stride': [1, 2, 2, 2, 1, 2, 1],
'dropout_rate': 0.2,
'drop_connect_rate': 0.2,
}
return EfficientNet(cfg)
def test():
net = EfficientNetB0()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| 5,719 | 31.5 | 106 | py |
DEAT | DEAT-main/models/pnasnet.py | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 4,258 | 32.801587 | 105 | py |
DEAT | DEAT-main/models/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 4,218 | 30.721805 | 83 | py |
DEAT | DEAT-main/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,092 | 34.551724 | 114 | py |
DEAT | DEAT-main/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 1,442 | 29.0625 | 117 | py |
DEAT | DEAT-main/models/densenet.py | '''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.805556 | 96 | py |
DEAT | DEAT-main/models/googlenet.py | '''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,221 | 28.833333 | 83 | py |
DEAT | DEAT-main/models/resnext.py | '''ResNeXt in PyTorch.
See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test_resnext()
| 3,478 | 35.239583 | 129 | py |
DEAT | DEAT-main/models/senet.py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,027 | 32.016393 | 102 | py |
DEAT | DEAT-main/models/shufflenet.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.209091 | 126 | py |
DEAT | DEAT-main/models/lenet.py | '''LeNet in PyTorch.'''
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
| 699 | 28.166667 | 43 | py |
DEAT | DEAT-main/models/mobilenet.py | '''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 2,025 | 31.677419 | 123 | py |
DEAT | DEAT-main/models/dpn.py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,562 | 34.989899 | 116 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/pnm_optim/pnm.py |
import math
import torch
from torch.optim.optimizer import Optimizer, required
class PNM(Optimizer):
r"""Implements Positive-Negative Momentum (PNM).
It has be proposed in
`Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve
Generalization`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
betas (Tuple[float, float], optional): inertia coefficients used for computing
pn momentum(default: (0.9, 1.))
weight_decay (float, optional): weight decay (default: 0)
decoupled (bool, optional): decoupled weight decay or L2 regularization (default: True)
"""
def __init__(self, params, lr=required, betas=(0.9, 1.), weight_decay=0, decoupled=True):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0. <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay, decoupled=decoupled)
super(PNM, self).__init__(params, defaults)
def __setstate__(self, state):
super(PNM, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('decoupled', True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
# Perform decoupled weight decay or L2 Regularization
if group['decoupled']:
p.mul_(1 - group['lr'] * group['weight_decay'])
else:
d_p.add_(p.data, alpha=group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['step'] += 1
param_state = self.state[p]
if state['step'] == 1:
pos_momentum = param_state['pos_momentum'] = torch.zeros_like(p, memory_format=torch.preserve_format)
neg_momentum = param_state['neg_momentum'] = torch.zeros_like(p, memory_format=torch.preserve_format)
elif state['step'] % 2 == 1:
pos_momentum = param_state['pos_momentum']
neg_momentum = param_state['neg_momentum']
else:
neg_momentum = param_state['pos_momentum']
pos_momentum = param_state['neg_momentum']
pos_momentum.mul_(beta1**2).add_(d_p, alpha=1-beta1**2)
noise_norm = math.sqrt((1+beta2) ** 2 + beta2 ** 2)
delta_p = pos_momentum.mul(1+beta2).add(neg_momentum, alpha=-beta2).mul(1/noise_norm)
p.add_(delta_p, alpha=-group['lr'])
return loss
| 3,616 | 39.188889 | 121 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/pnm_optim/adapnm.py |
import math
import torch
from torch.optim.optimizer import Optimizer, required
class AdaPNM(Optimizer):
r"""Implements Adaptive Positive-Negative Momentum.
It has be proposed in
`Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve
Generalization`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float, float], optional): coefficients used for computing
running averages of gradient, its square, and pn momentum (default: (0.9,0.999, 1.))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (default: 0.)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: True)
decoupled (bool, optional): decoupled weight decay or L2 regularization (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999, 1.), eps=1e-8,
weight_decay=0., amsgrad=True, decoupled=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0. <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad, decoupled=decoupled)
super(AdaPNM, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaPNM, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', True)
group.setdefault('decoupled', True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('This optimizer does not support sparse gradients.')
# Perform decoupled weight decay or L2 Regularization
if group['decoupled']:
p.mul_(1 - group['lr'] * group['weight_decay'])
else:
grad.add_(p.data, alpha=group['weight_decay'])
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of gradient values
state['neg_exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2, beta3 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg_sq = state['exp_avg_sq']
if state['step'] % 2 == 1:
exp_avg, neg_exp_avg = state['exp_avg'], state['neg_exp_avg']
else:
exp_avg, neg_exp_avg = state['neg_exp_avg'], state['exp_avg']
exp_avg.mul_(beta1**2).add_(grad, alpha=1 - beta1**2)
noise_norm = math.sqrt((1+beta3) ** 2 + beta3 ** 2)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
pnmomentum = exp_avg.mul(1+beta3).add(neg_exp_avg,alpha=-beta3).mul(1/noise_norm)
p.addcdiv_(pnmomentum, denom, value=-step_size)
return loss
| 5,725 | 45.177419 | 106 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/resnet.py | '''ResNet in PyTorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes)
| 4,138 | 34.076271 | 102 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/vgg.py | '''VGG11/13/16/19 in Pytorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, num_classes=10):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, num_classes)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
| 1,432 | 31.568182 | 117 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/densenet.py | '''DenseNet in PyTorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, num_classes=num_classes)
def DenseNet169(num_classes=10):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, num_classes=num_classes)
def DenseNet201(num_classes=10):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, num_classes=num_classes)
def DenseNet161(num_classes=10):
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, num_classes=num_classes)
def densenet_cifar(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12, num_classes=num_classes)
| 3,707 | 33.981132 | 96 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/googlenet.py | """google net in pytorch
The source code is adopted from:
https://github.com/weiaicunzai/pytorch-cifar100/
[1] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
Going Deeper with Convolutions
https://arxiv.org/abs/1409.4842v1
"""
import torch
import torch.nn as nn
class Inception(nn.Module):
def __init__(self, input_channels, n1x1, n3x3_reduce, n3x3, n5x5_reduce, n5x5, pool_proj):
super().__init__()
#1x1conv branch
self.b1 = nn.Sequential(
nn.Conv2d(input_channels, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(inplace=True)
)
#1x1conv -> 3x3conv branch
self.b2 = nn.Sequential(
nn.Conv2d(input_channels, n3x3_reduce, kernel_size=1),
nn.BatchNorm2d(n3x3_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n3x3_reduce, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(inplace=True)
)
#1x1conv -> 5x5conv branch
#we use 2 3x3 conv filters stacked instead
#of 1 5x5 filters to obtain the same receptive
#field with fewer parameters
self.b3 = nn.Sequential(
nn.Conv2d(input_channels, n5x5_reduce, kernel_size=1),
nn.BatchNorm2d(n5x5_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5_reduce, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5, n5x5),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(inplace=True)
)
#3x3pooling -> 1x1conv
#same conv
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(input_channels, pool_proj, kernel_size=1),
nn.BatchNorm2d(pool_proj),
nn.ReLU(inplace=True)
)
def forward(self, x):
return torch.cat([self.b1(x), self.b2(x), self.b3(x), self.b4(x)], dim=1)
class GoogleNet(nn.Module):
def __init__(self, num_class=100):
super().__init__()
self.prelayer = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
#although we only use 1 conv layer as prelayer,
#we still use name a3, b3.......
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
#"""In general, an Inception network is a network consisting of
#modules of the above type stacked upon each other, with occasional
#max-pooling layers with stride 2 to halve the resolution of the
#grid"""
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
#input feature size: 8*8*1024
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout2d(p=0.4)
self.linear = nn.Linear(1024, num_class)
def forward(self, x):
output = self.prelayer(x)
output = self.a3(output)
output = self.b3(output)
output = self.maxpool(output)
output = self.a4(output)
output = self.b4(output)
output = self.c4(output)
output = self.d4(output)
output = self.e4(output)
output = self.maxpool(output)
output = self.a5(output)
output = self.b5(output)
#"""It was found that a move from fully connected layers to
#average pooling improved the top-1 accuracy by about 0.6%,
#however the use of dropout remained essential even after
#removing the fully connected layers."""
output = self.avgpool(output)
output = self.dropout(output)
output = output.view(output.size()[0], -1)
output = self.linear(output)
return output
def googlenet():
return GoogleNet()
| 4,443 | 32.164179 | 94 | py |
beta-tcvae | beta-tcvae-master/disentanglement_metrics.py | import math
import os
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.autograd import Variable
import lib.utils as utils
from metric_helpers.loader import load_model_and_dataset
from metric_helpers.mi_metric import compute_metric_shapes, compute_metric_faces
def estimate_entropies(qz_samples, qz_params, q_dist, n_samples=10000, weights=None):
"""Computes the term:
E_{p(x)} E_{q(z|x)} [-log q(z)]
and
E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]
where q(z) = 1/N sum_n=1^N q(z|x_n).
Assumes samples are from q(z|x) for *all* x in the dataset.
Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).
Computes numerically stable NLL:
- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)
Inputs:
-------
qz_samples (K, N) Variable
qz_params (N, K, nparams) Variable
weights (N) Variable
"""
# Only take a sample subset of the samples
if weights is None:
qz_samples = qz_samples.index_select(1, Variable(torch.randperm(qz_samples.size(1))[:n_samples].cuda()))
else:
sample_inds = torch.multinomial(weights, n_samples, replacement=True)
qz_samples = qz_samples.index_select(1, sample_inds)
K, S = qz_samples.size()
N, _, nparams = qz_params.size()
assert(nparams == q_dist.nparams)
assert(K == qz_params.size(1))
if weights is None:
weights = -math.log(N)
else:
weights = torch.log(weights.view(N, 1, 1) / weights.sum())
entropies = torch.zeros(K).cuda()
pbar = tqdm(total=S)
k = 0
while k < S:
batch_size = min(10, S - k)
logqz_i = q_dist.log_density(
qz_samples.view(1, K, S).expand(N, K, S)[:, :, k:k + batch_size],
qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)[:, :, k:k + batch_size])
k += batch_size
# computes - log q(z_i) summed over minibatch
entropies += - utils.logsumexp(logqz_i + weights, dim=0, keepdim=False).data.sum(1)
pbar.update(batch_size)
pbar.close()
entropies /= S
return entropies
def mutual_info_metric_shapes(vae, shapes_dataset):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = Variable(qz_params.view(3, 6, 40, 32, 32, K, nparams).cuda())
qz_samples = vae.q_dist.sample(params=qz_params)
print('Estimating marginal entropies.')
# marginal entropies
marginal_entropies = estimate_entropies(
qz_samples.view(N, K).transpose(0, 1),
qz_params.view(N, K, nparams),
vae.q_dist)
marginal_entropies = marginal_entropies.cpu()
cond_entropies = torch.zeros(4, K)
print('Estimating conditional entropies for scale.')
for i in range(6):
qz_samples_scale = qz_samples[:, i, :, :, :, :].contiguous()
qz_params_scale = qz_params[:, i, :, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 6, K).transpose(0, 1),
qz_params_scale.view(N // 6, K, nparams),
vae.q_dist)
cond_entropies[0] += cond_entropies_i.cpu() / 6
print('Estimating conditional entropies for orientation.')
for i in range(40):
qz_samples_scale = qz_samples[:, :, i, :, :, :].contiguous()
qz_params_scale = qz_params[:, :, i, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 40, K).transpose(0, 1),
qz_params_scale.view(N // 40, K, nparams),
vae.q_dist)
cond_entropies[1] += cond_entropies_i.cpu() / 40
print('Estimating conditional entropies for pos x.')
for i in range(32):
qz_samples_scale = qz_samples[:, :, :, i, :, :].contiguous()
qz_params_scale = qz_params[:, :, :, i, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 32, K).transpose(0, 1),
qz_params_scale.view(N // 32, K, nparams),
vae.q_dist)
cond_entropies[2] += cond_entropies_i.cpu() / 32
print('Estimating conditional entropies for pox y.')
for i in range(32):
qz_samples_scale = qz_samples[:, :, :, :, i, :].contiguous()
qz_params_scale = qz_params[:, :, :, :, i, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 32, K).transpose(0, 1),
qz_params_scale.view(N // 32, K, nparams),
vae.q_dist)
cond_entropies[3] += cond_entropies_i.cpu() / 32
metric = compute_metric_shapes(marginal_entropies, cond_entropies)
return metric, marginal_entropies, cond_entropies
def mutual_info_metric_faces(vae, shapes_dataset):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = Variable(qz_params.view(50, 21, 11, 11, K, nparams).cuda())
qz_samples = vae.q_dist.sample(params=qz_params)
print('Estimating marginal entropies.')
# marginal entropies
marginal_entropies = estimate_entropies(
qz_samples.view(N, K).transpose(0, 1),
qz_params.view(N, K, nparams),
vae.q_dist)
marginal_entropies = marginal_entropies.cpu()
cond_entropies = torch.zeros(3, K)
print('Estimating conditional entropies for azimuth.')
for i in range(21):
qz_samples_pose_az = qz_samples[:, i, :, :, :].contiguous()
qz_params_pose_az = qz_params[:, i, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_pose_az.view(N // 21, K).transpose(0, 1),
qz_params_pose_az.view(N // 21, K, nparams),
vae.q_dist)
cond_entropies[0] += cond_entropies_i.cpu() / 21
print('Estimating conditional entropies for elevation.')
for i in range(11):
qz_samples_pose_el = qz_samples[:, :, i, :, :].contiguous()
qz_params_pose_el = qz_params[:, :, i, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_pose_el.view(N // 11, K).transpose(0, 1),
qz_params_pose_el.view(N // 11, K, nparams),
vae.q_dist)
cond_entropies[1] += cond_entropies_i.cpu() / 11
print('Estimating conditional entropies for lighting.')
for i in range(11):
qz_samples_lighting = qz_samples[:, :, :, i, :].contiguous()
qz_params_lighting = qz_params[:, :, :, i, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_lighting.view(N // 11, K).transpose(0, 1),
qz_params_lighting.view(N // 11, K, nparams),
vae.q_dist)
cond_entropies[2] += cond_entropies_i.cpu() / 11
metric = compute_metric_faces(marginal_entropies, cond_entropies)
return metric, marginal_entropies, cond_entropies
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', required=True)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--save', type=str, default='.')
args = parser.parse_args()
if args.gpu != 0:
torch.cuda.set_device(args.gpu)
vae, dataset, cpargs = load_model_and_dataset(args.checkpt)
metric, marginal_entropies, cond_entropies = eval('mutual_info_metric_' + cpargs.dataset)(vae, dataset)
torch.save({
'metric': metric,
'marginal_entropies': marginal_entropies,
'cond_entropies': cond_entropies,
}, os.path.join(args.save, 'disentanglement_metric.pth'))
print('MIG: {:.2f}'.format(metric))
| 8,678 | 34.863636 | 112 | py |
beta-tcvae | beta-tcvae-master/vae_quant.py | import os
import time
import math
from numbers import Number
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import visdom
from torch.autograd import Variable
from torch.utils.data import DataLoader
import lib.dist as dist
import lib.utils as utils
import lib.datasets as dset
from lib.flows import FactorialNormalizingFlow
from elbo_decomposition import elbo_decomposition
from plot_latent_vs_true import plot_vs_gt_shapes, plot_vs_gt_faces # noqa: F401
class MLPEncoder(nn.Module):
def __init__(self, output_dim):
super(MLPEncoder, self).__init__()
self.output_dim = output_dim
self.fc1 = nn.Linear(4096, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, output_dim)
self.conv_z = nn.Conv2d(64, output_dim, 4, 1, 0)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, x):
h = x.view(-1, 64 * 64)
h = self.act(self.fc1(h))
h = self.act(self.fc2(h))
h = self.fc3(h)
z = h.view(x.size(0), self.output_dim)
return z
class MLPDecoder(nn.Module):
def __init__(self, input_dim):
super(MLPDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, 1200),
nn.Tanh(),
nn.Linear(1200, 1200),
nn.Tanh(),
nn.Linear(1200, 1200),
nn.Tanh(),
nn.Linear(1200, 4096)
)
def forward(self, z):
h = z.view(z.size(0), -1)
h = self.net(h)
mu_img = h.view(z.size(0), 1, 64, 64)
return mu_img
class ConvEncoder(nn.Module):
def __init__(self, output_dim):
super(ConvEncoder, self).__init__()
self.output_dim = output_dim
self.conv1 = nn.Conv2d(1, 32, 4, 2, 1) # 32 x 32
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 4, 2, 1) # 16 x 16
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, 4, 2, 1) # 8 x 8
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 64, 4, 2, 1) # 4 x 4
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 512, 4)
self.bn5 = nn.BatchNorm2d(512)
self.conv_z = nn.Conv2d(512, output_dim, 1)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, x):
h = x.view(-1, 1, 64, 64)
h = self.act(self.bn1(self.conv1(h)))
h = self.act(self.bn2(self.conv2(h)))
h = self.act(self.bn3(self.conv3(h)))
h = self.act(self.bn4(self.conv4(h)))
h = self.act(self.bn5(self.conv5(h)))
z = self.conv_z(h).view(x.size(0), self.output_dim)
return z
class ConvDecoder(nn.Module):
def __init__(self, input_dim):
super(ConvDecoder, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_dim, 512, 1, 1, 0) # 1 x 1
self.bn1 = nn.BatchNorm2d(512)
self.conv2 = nn.ConvTranspose2d(512, 64, 4, 1, 0) # 4 x 4
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.ConvTranspose2d(64, 64, 4, 2, 1) # 8 x 8
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.ConvTranspose2d(64, 32, 4, 2, 1) # 16 x 16
self.bn4 = nn.BatchNorm2d(32)
self.conv5 = nn.ConvTranspose2d(32, 32, 4, 2, 1) # 32 x 32
self.bn5 = nn.BatchNorm2d(32)
self.conv_final = nn.ConvTranspose2d(32, 1, 4, 2, 1)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, z):
h = z.view(z.size(0), z.size(1), 1, 1)
h = self.act(self.bn1(self.conv1(h)))
h = self.act(self.bn2(self.conv2(h)))
h = self.act(self.bn3(self.conv3(h)))
h = self.act(self.bn4(self.conv4(h)))
h = self.act(self.bn5(self.conv5(h)))
mu_img = self.conv_final(h)
return mu_img
class VAE(nn.Module):
def __init__(self, z_dim, use_cuda=False, prior_dist=dist.Normal(), q_dist=dist.Normal(),
include_mutinfo=True, tcvae=False, conv=False, mss=False):
super(VAE, self).__init__()
self.use_cuda = use_cuda
self.z_dim = z_dim
self.include_mutinfo = include_mutinfo
self.tcvae = tcvae
self.lamb = 0
self.beta = 1
self.mss = mss
self.x_dist = dist.Bernoulli()
# Model-specific
# distribution family of p(z)
self.prior_dist = prior_dist
self.q_dist = q_dist
# hyperparameters for prior p(z)
self.register_buffer('prior_params', torch.zeros(self.z_dim, 2))
# create the encoder and decoder networks
if conv:
self.encoder = ConvEncoder(z_dim * self.q_dist.nparams)
self.decoder = ConvDecoder(z_dim)
else:
self.encoder = MLPEncoder(z_dim * self.q_dist.nparams)
self.decoder = MLPDecoder(z_dim)
if use_cuda:
# calling cuda() here will put all the parameters of
# the encoder and decoder networks into gpu memory
self.cuda()
# return prior parameters wrapped in a suitable Variable
def _get_prior_params(self, batch_size=1):
expanded_size = (batch_size,) + self.prior_params.size()
prior_params = Variable(self.prior_params.expand(expanded_size))
return prior_params
# samples from the model p(x|z)p(z)
def model_sample(self, batch_size=1):
# sample from prior (value will be sampled by guide when computing the ELBO)
prior_params = self._get_prior_params(batch_size)
zs = self.prior_dist.sample(params=prior_params)
# decode the latent code z
x_params = self.decoder.forward(zs)
return x_params
# define the guide (i.e. variational distribution) q(z|x)
def encode(self, x):
x = x.view(x.size(0), 1, 64, 64)
# use the encoder to get the parameters used to define q(z|x)
z_params = self.encoder.forward(x).view(x.size(0), self.z_dim, self.q_dist.nparams)
# sample the latent code z
zs = self.q_dist.sample(params=z_params)
return zs, z_params
def decode(self, z):
x_params = self.decoder.forward(z).view(z.size(0), 1, 64, 64)
xs = self.x_dist.sample(params=x_params)
return xs, x_params
# define a helper function for reconstructing images
def reconstruct_img(self, x):
zs, z_params = self.encode(x)
xs, x_params = self.decode(zs)
return xs, x_params, zs, z_params
def _log_importance_weight_matrix(self, batch_size, dataset_size):
N = dataset_size
M = batch_size - 1
strat_weight = (N - M) / (N * M)
W = torch.Tensor(batch_size, batch_size).fill_(1 / M)
W.view(-1)[::M+1] = 1 / N
W.view(-1)[1::M+1] = strat_weight
W[M-1, 0] = strat_weight
return W.log()
def elbo(self, x, dataset_size):
# log p(x|z) + log p(z) - log q(z|x)
batch_size = x.size(0)
x = x.view(batch_size, 1, 64, 64)
prior_params = self._get_prior_params(batch_size)
x_recon, x_params, zs, z_params = self.reconstruct_img(x)
logpx = self.x_dist.log_density(x, params=x_params).view(batch_size, -1).sum(1)
logpz = self.prior_dist.log_density(zs, params=prior_params).view(batch_size, -1).sum(1)
logqz_condx = self.q_dist.log_density(zs, params=z_params).view(batch_size, -1).sum(1)
elbo = logpx + logpz - logqz_condx
if self.beta == 1 and self.include_mutinfo and self.lamb == 0:
return elbo, elbo.detach()
# compute log q(z) ~= log 1/(NM) sum_m=1^M q(z|x_m) = - log(MN) + logsumexp_m(q(z|x_m))
_logqz = self.q_dist.log_density(
zs.view(batch_size, 1, self.z_dim),
z_params.view(1, batch_size, self.z_dim, self.q_dist.nparams)
)
if not self.mss:
# minibatch weighted sampling
logqz_prodmarginals = (logsumexp(_logqz, dim=1, keepdim=False) - math.log(batch_size * dataset_size)).sum(1)
logqz = (logsumexp(_logqz.sum(2), dim=1, keepdim=False) - math.log(batch_size * dataset_size))
else:
# minibatch stratified sampling
logiw_matrix = Variable(self._log_importance_weight_matrix(batch_size, dataset_size).type_as(_logqz.data))
logqz = logsumexp(logiw_matrix + _logqz.sum(2), dim=1, keepdim=False)
logqz_prodmarginals = logsumexp(
logiw_matrix.view(batch_size, batch_size, 1) + _logqz, dim=1, keepdim=False).sum(1)
if not self.tcvae:
if self.include_mutinfo:
modified_elbo = logpx - self.beta * (
(logqz_condx - logpz) -
self.lamb * (logqz_prodmarginals - logpz)
)
else:
modified_elbo = logpx - self.beta * (
(logqz - logqz_prodmarginals) +
(1 - self.lamb) * (logqz_prodmarginals - logpz)
)
else:
if self.include_mutinfo:
modified_elbo = logpx - \
(logqz_condx - logqz) - \
self.beta * (logqz - logqz_prodmarginals) - \
(1 - self.lamb) * (logqz_prodmarginals - logpz)
else:
modified_elbo = logpx - \
self.beta * (logqz - logqz_prodmarginals) - \
(1 - self.lamb) * (logqz_prodmarginals - logpz)
return modified_elbo, elbo.detach()
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
# for loading and batching datasets
def setup_data_loaders(args, use_cuda=False):
if args.dataset == 'shapes':
train_set = dset.Shapes()
elif args.dataset == 'faces':
train_set = dset.Faces()
else:
raise ValueError('Unknown dataset ' + str(args.dataset))
kwargs = {'num_workers': 4, 'pin_memory': use_cuda}
train_loader = DataLoader(dataset=train_set,
batch_size=args.batch_size, shuffle=True, **kwargs)
return train_loader
win_samples = None
win_test_reco = None
win_latent_walk = None
win_train_elbo = None
def display_samples(model, x, vis):
global win_samples, win_test_reco, win_latent_walk
# plot random samples
sample_mu = model.model_sample(batch_size=100).sigmoid()
sample_mu = sample_mu
images = list(sample_mu.view(-1, 1, 64, 64).data.cpu())
win_samples = vis.images(images, 10, 2, opts={'caption': 'samples'}, win=win_samples)
# plot the reconstructed distribution for the first 50 test images
test_imgs = x[:50, :]
_, reco_imgs, zs, _ = model.reconstruct_img(test_imgs)
reco_imgs = reco_imgs.sigmoid()
test_reco_imgs = torch.cat([
test_imgs.view(1, -1, 64, 64), reco_imgs.view(1, -1, 64, 64)], 0).transpose(0, 1)
win_test_reco = vis.images(
list(test_reco_imgs.contiguous().view(-1, 1, 64, 64).data.cpu()), 10, 2,
opts={'caption': 'test reconstruction image'}, win=win_test_reco)
# plot latent walks (change one variable while all others stay the same)
zs = zs[0:3]
batch_size, z_dim = zs.size()
xs = []
delta = torch.autograd.Variable(torch.linspace(-2, 2, 7), volatile=True).type_as(zs)
for i in range(z_dim):
vec = Variable(torch.zeros(z_dim)).view(1, z_dim).expand(7, z_dim).contiguous().type_as(zs)
vec[:, i] = 1
vec = vec * delta[:, None]
zs_delta = zs.clone().view(batch_size, 1, z_dim)
zs_delta[:, :, i] = 0
zs_walk = zs_delta + vec[None]
xs_walk = model.decoder.forward(zs_walk.view(-1, z_dim)).sigmoid()
xs.append(xs_walk)
xs = list(torch.cat(xs, 0).data.cpu())
win_latent_walk = vis.images(xs, 7, 2, opts={'caption': 'latent walk'}, win=win_latent_walk)
def plot_elbo(train_elbo, vis):
global win_train_elbo
win_train_elbo = vis.line(torch.Tensor(train_elbo), opts={'markers': True}, win=win_train_elbo)
def anneal_kl(args, vae, iteration):
if args.dataset == 'shapes':
warmup_iter = 7000
elif args.dataset == 'faces':
warmup_iter = 2500
if args.lambda_anneal:
vae.lamb = max(0, 0.95 - 1 / warmup_iter * iteration) # 1 --> 0
else:
vae.lamb = 0
if args.beta_anneal:
vae.beta = min(args.beta, args.beta / warmup_iter * iteration) # 0 --> 1
else:
vae.beta = args.beta
def main():
# parse command line arguments
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-d', '--dataset', default='shapes', type=str, help='dataset name',
choices=['shapes', 'faces'])
parser.add_argument('-dist', default='normal', type=str, choices=['normal', 'laplace', 'flow'])
parser.add_argument('-n', '--num-epochs', default=50, type=int, help='number of training epochs')
parser.add_argument('-b', '--batch-size', default=2048, type=int, help='batch size')
parser.add_argument('-l', '--learning-rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('-z', '--latent-dim', default=10, type=int, help='size of latent dimension')
parser.add_argument('--beta', default=1, type=float, help='ELBO penalty term')
parser.add_argument('--tcvae', action='store_true')
parser.add_argument('--exclude-mutinfo', action='store_true')
parser.add_argument('--beta-anneal', action='store_true')
parser.add_argument('--lambda-anneal', action='store_true')
parser.add_argument('--mss', action='store_true', help='use the improved minibatch estimator')
parser.add_argument('--conv', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--visdom', action='store_true', help='whether plotting in visdom is desired')
parser.add_argument('--save', default='test1')
parser.add_argument('--log_freq', default=200, type=int, help='num iterations per log')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
# data loader
train_loader = setup_data_loaders(args, use_cuda=True)
# setup the VAE
if args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist,
include_mutinfo=not args.exclude_mutinfo, tcvae=args.tcvae, conv=args.conv, mss=args.mss)
# setup the optimizer
optimizer = optim.Adam(vae.parameters(), lr=args.learning_rate)
# setup visdom for visualization
if args.visdom:
vis = visdom.Visdom(env=args.save, port=4500)
train_elbo = []
# training loop
dataset_size = len(train_loader.dataset)
num_iterations = len(train_loader) * args.num_epochs
iteration = 0
# initialize loss accumulator
elbo_running_mean = utils.RunningAverageMeter()
while iteration < num_iterations:
for i, x in enumerate(train_loader):
iteration += 1
batch_time = time.time()
vae.train()
anneal_kl(args, vae, iteration)
optimizer.zero_grad()
# transfer to GPU
x = x.cuda(async=True)
# wrap the mini-batch in a PyTorch Variable
x = Variable(x)
# do ELBO gradient and accumulate loss
obj, elbo = vae.elbo(x, dataset_size)
if utils.isnan(obj).any():
raise ValueError('NaN spotted in objective.')
obj.mean().mul(-1).backward()
elbo_running_mean.update(elbo.mean().data[0])
optimizer.step()
# report training diagnostics
if iteration % args.log_freq == 0:
train_elbo.append(elbo_running_mean.avg)
print('[iteration %03d] time: %.2f \tbeta %.2f \tlambda %.2f training ELBO: %.4f (%.4f)' % (
iteration, time.time() - batch_time, vae.beta, vae.lamb,
elbo_running_mean.val, elbo_running_mean.avg))
vae.eval()
# plot training and test ELBOs
if args.visdom:
display_samples(vae, x, vis)
plot_elbo(train_elbo, vis)
utils.save_checkpoint({
'state_dict': vae.state_dict(),
'args': args}, args.save, 0)
eval('plot_vs_gt_' + args.dataset)(vae, train_loader.dataset,
os.path.join(args.save, 'gt_vs_latent_{:05d}.png'.format(iteration)))
# Report statistics after training
vae.eval()
utils.save_checkpoint({
'state_dict': vae.state_dict(),
'args': args}, args.save, 0)
dataset_loader = DataLoader(train_loader.dataset, batch_size=1000, num_workers=1, shuffle=False)
logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = \
elbo_decomposition(vae, dataset_loader)
torch.save({
'logpx': logpx,
'dependence': dependence,
'information': information,
'dimwise_kl': dimwise_kl,
'analytical_cond_kl': analytical_cond_kl,
'marginal_entropies': marginal_entropies,
'joint_entropy': joint_entropy
}, os.path.join(args.save, 'elbo_decomposition.pth'))
eval('plot_vs_gt_' + args.dataset)(vae, dataset_loader.dataset, os.path.join(args.save, 'gt_vs_latent.png'))
return vae
if __name__ == '__main__':
model = main()
| 18,265 | 36.975052 | 120 | py |
beta-tcvae | beta-tcvae-master/plot_latent_vs_true.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import brewer2mpl
bmap = brewer2mpl.get_map('Set1', 'qualitative', 3)
colors = bmap.mpl_colors
plt.style.use('ggplot')
VAR_THRESHOLD = 1e-2
def plot_vs_gt_shapes(vae, shapes_dataset, save, z_inds=None):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(3, 6, 40, 32, 32, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_scale = qz_means.mean(2).mean(2).mean(2) # (shape, scale, latent)
mean_rotation = qz_means.mean(1).mean(2).mean(2) # (shape, rotation, latent)
mean_pos = qz_means.mean(0).mean(0).mean(0) # (pos_x, pos_y, latent)
fig = plt.figure(figsize=(3, len(z_inds))) # default is (8,6)
gs = gridspec.GridSpec(len(z_inds), 3)
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_pos = torch.min(mean_pos)
vmax_pos = torch.max(mean_pos)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i * 3])
ax.imshow(mean_pos[:, :, j].numpy(), cmap=plt.get_cmap('coolwarm'), vmin=vmin_pos, vmax=vmax_pos)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(r'$z_' + str(j) + r'$')
if i == len(z_inds) - 1:
ax.set_xlabel(r'pos')
vmin_scale = torch.min(mean_scale)
vmax_scale = torch.max(mean_scale)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[1 + i * 3])
ax.plot(mean_scale[0, :, j].numpy(), color=colors[2])
ax.plot(mean_scale[1, :, j].numpy(), color=colors[0])
ax.plot(mean_scale[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'scale')
vmin_rotation = torch.min(mean_rotation)
vmax_rotation = torch.max(mean_rotation)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 + i * 3])
ax.plot(mean_rotation[0, :, j].numpy(), color=colors[2])
ax.plot(mean_rotation[1, :, j].numpy(), color=colors[0])
ax.plot(mean_rotation[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_rotation, vmax_rotation])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'rotation')
fig.text(0.5, 0.03, 'Ground Truth', ha='center')
fig.text(0.01, 0.5, 'Learned Latent Variables ', va='center', rotation='vertical')
plt.savefig(save)
plt.close()
def plot_vs_gt_faces(vae, faces_dataset, save, z_inds=None):
dataset_loader = DataLoader(faces_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(50, 21, 11, 11, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_pose_az = qz_means.mean(3).mean(2).mean(0) # (pose_az, latent)
mean_pose_el = qz_means.mean(3).mean(1).mean(0) # (pose_el, latent)
mean_light_az = qz_means.mean(2).mean(1).mean(0) # (light_az, latent)
fig = plt.figure(figsize=(len(z_inds), 3)) # default is (8,6)
gs = gridspec.GridSpec(3, len(z_inds))
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_scale = torch.min(mean_pose_az)
vmax_scale = torch.max(mean_pose_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i])
ax.plot(mean_pose_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'azimuth')
vmin_scale = torch.min(mean_pose_el)
vmax_scale = torch.max(mean_pose_el)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[len(z_inds) + i])
ax.plot(mean_pose_el[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'elevation')
vmin_scale = torch.min(mean_light_az)
vmax_scale = torch.max(mean_light_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 * len(z_inds) + i])
ax.plot(mean_light_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'lighting')
plt.suptitle('GT Factors vs. Latent Variables')
plt.savefig(save)
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-checkpt', required=True)
parser.add_argument('-zs', type=str, default=None)
parser.add_argument('-gpu', type=int, default=0)
parser.add_argument('-save', type=str, default='latent_vs_gt.pdf')
parser.add_argument('-elbo_decomp', action='store_true')
args = parser.parse_args()
from elbo_decomposition import elbo_decomposition
import lib.dist as dist
import lib.flows as flows
from vae_quant import VAE, setup_data_loaders
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=lambda storage, loc: storage)
args = checkpt['args']
state_dict = checkpt['state_dict']
# model
if not hasattr(args, 'dist') or args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=4)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
# dataset loader
loader = setup_data_loaders(args)
return vae, loader, args
z_inds = list(map(int, args.zs.split(','))) if args.zs is not None else None
torch.cuda.set_device(args.gpu)
vae, dataset_loader, cpargs = load_model_and_dataset(args.checkpt)
if args.elbo_decomp:
elbo_decomposition(vae, dataset_loader)
eval('plot_vs_gt_' + cpargs.dataset)(vae, dataset_loader.dataset, args.save, z_inds)
| 8,973 | 36.864979 | 109 | py |
beta-tcvae | beta-tcvae-master/elbo_decomposition.py | import os
import math
from numbers import Number
from tqdm import tqdm
import torch
from torch.autograd import Variable
import lib.dist as dist
import lib.flows as flows
def estimate_entropies(qz_samples, qz_params, q_dist):
"""Computes the term:
E_{p(x)} E_{q(z|x)} [-log q(z)]
and
E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]
where q(z) = 1/N sum_n=1^N q(z|x_n).
Assumes samples are from q(z|x) for *all* x in the dataset.
Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).
Computes numerically stable NLL:
- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)
Inputs:
-------
qz_samples (K, S) Variable
qz_params (N, K, nparams) Variable
"""
# Only take a sample subset of the samples
qz_samples = qz_samples.index_select(1, Variable(torch.randperm(qz_samples.size(1))[:10000].cuda()))
K, S = qz_samples.size()
N, _, nparams = qz_params.size()
assert(nparams == q_dist.nparams)
assert(K == qz_params.size(1))
marginal_entropies = torch.zeros(K).cuda()
joint_entropy = torch.zeros(1).cuda()
pbar = tqdm(total=S)
k = 0
while k < S:
batch_size = min(10, S - k)
logqz_i = q_dist.log_density(
qz_samples.view(1, K, S).expand(N, K, S)[:, :, k:k + batch_size],
qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)[:, :, k:k + batch_size])
k += batch_size
# computes - log q(z_i) summed over minibatch
marginal_entropies += (math.log(N) - logsumexp(logqz_i, dim=0, keepdim=False).data).sum(1)
# computes - log q(z) summed over minibatch
logqz = logqz_i.sum(1) # (N, S)
joint_entropy += (math.log(N) - logsumexp(logqz, dim=0, keepdim=False).data).sum(0)
pbar.update(batch_size)
pbar.close()
marginal_entropies /= S
joint_entropy /= S
return marginal_entropies, joint_entropy
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
def analytical_NLL(qz_params, q_dist, prior_dist, qz_samples=None):
"""Computes the quantities
1/N sum_n=1^N E_{q(z|x)} [ - log q(z|x) ]
and
1/N sum_n=1^N E_{q(z_j|x)} [ - log p(z_j) ]
Inputs:
-------
qz_params (N, K, nparams) Variable
Returns:
--------
nlogqz_condx (K,) Variable
nlogpz (K,) Variable
"""
pz_params = Variable(torch.zeros(1).type_as(qz_params.data).expand(qz_params.size()), volatile=True)
nlogqz_condx = q_dist.NLL(qz_params).mean(0)
nlogpz = prior_dist.NLL(pz_params, qz_params).mean(0)
return nlogqz_condx, nlogpz
def elbo_decomposition(vae, dataset_loader):
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
S = 1 # number of latent variable samples
nparams = vae.q_dist.nparams
print('Computing q(z|x) distributions.')
# compute the marginal q(z_j|x_n) distributions
qz_params = torch.Tensor(N, K, nparams)
n = 0
logpx = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, -1, 64, 64).cuda(), volatile=True)
z_params = vae.encoder.forward(xs).view(batch_size, K, nparams)
qz_params[n:n + batch_size] = z_params.data
n += batch_size
# estimate reconstruction term
for _ in range(S):
z = vae.q_dist.sample(params=z_params)
x_params = vae.decoder.forward(z)
logpx += vae.x_dist.log_density(xs, params=x_params).view(batch_size, -1).data.sum()
# Reconstruction term
logpx = logpx / (N * S)
qz_params = Variable(qz_params.cuda(), volatile=True)
print('Sampling from q(z).')
# sample S times from each marginal q(z_j|x_n)
qz_params_expanded = qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)
qz_samples = vae.q_dist.sample(params=qz_params_expanded)
qz_samples = qz_samples.transpose(0, 1).contiguous().view(K, N * S)
print('Estimating entropies.')
marginal_entropies, joint_entropy = estimate_entropies(qz_samples, qz_params, vae.q_dist)
if hasattr(vae.q_dist, 'NLL'):
nlogqz_condx = vae.q_dist.NLL(qz_params).mean(0)
else:
nlogqz_condx = - vae.q_dist.log_density(qz_samples,
qz_params_expanded.transpose(0, 1).contiguous().view(K, N * S)).mean(1)
if hasattr(vae.prior_dist, 'NLL'):
pz_params = vae._get_prior_params(N * K).contiguous().view(N, K, -1)
nlogpz = vae.prior_dist.NLL(pz_params, qz_params).mean(0)
else:
nlogpz = - vae.prior_dist.log_density(qz_samples.transpose(0, 1)).mean(0)
# nlogqz_condx, nlogpz = analytical_NLL(qz_params, vae.q_dist, vae.prior_dist)
nlogqz_condx = nlogqz_condx.data
nlogpz = nlogpz.data
# Independence term
# KL(q(z)||prod_j q(z_j)) = log q(z) - sum_j log q(z_j)
dependence = (- joint_entropy + marginal_entropies.sum())[0]
# Information term
# KL(q(z|x)||q(z)) = log q(z|x) - log q(z)
information = (- nlogqz_condx.sum() + joint_entropy)[0]
# Dimension-wise KL term
# sum_j KL(q(z_j)||p(z_j)) = sum_j (log q(z_j) - log p(z_j))
dimwise_kl = (- marginal_entropies + nlogpz).sum()
# Compute sum of terms analytically
# KL(q(z|x)||p(z)) = log q(z|x) - log p(z)
analytical_cond_kl = (- nlogqz_condx + nlogpz).sum()
print('Dependence: {}'.format(dependence))
print('Information: {}'.format(information))
print('Dimension-wise KL: {}'.format(dimwise_kl))
print('Analytical E_p(x)[ KL(q(z|x)||p(z)) ]: {}'.format(analytical_cond_kl))
print('Estimated ELBO: {}'.format(logpx - analytical_cond_kl))
return logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-checkpt', required=True)
parser.add_argument('-save', type=str, default='.')
parser.add_argument('-gpu', type=int, default=0)
args = parser.parse_args()
def load_model_and_dataset(checkpt_filename):
checkpt = torch.load(checkpt_filename)
args = checkpt['args']
state_dict = checkpt['state_dict']
# backwards compatibility
if not hasattr(args, 'conv'):
args.conv = False
from vae_quant import VAE, setup_data_loaders
# model
if args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
vae.eval()
# dataset loader
loader = setup_data_loaders(args, use_cuda=True)
return vae, loader
torch.cuda.set_device(args.gpu)
vae, dataset_loader = load_model_and_dataset(args.checkpt)
logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = \
elbo_decomposition(vae, dataset_loader)
torch.save({
'logpx': logpx,
'dependence': dependence,
'information': information,
'dimwise_kl': dimwise_kl,
'analytical_cond_kl': analytical_cond_kl,
'marginal_entropies': marginal_entropies,
'joint_entropy': joint_entropy
}, os.path.join(args.save, 'elbo_decomposition.pth'))
| 8,303 | 34.33617 | 109 | py |
beta-tcvae | beta-tcvae-master/metric_helpers/mi_metric.py | import torch
metric_name = 'MIG'
def MIG(mi_normed):
return torch.mean(mi_normed[:, 0] - mi_normed[:, 1])
def compute_metric_shapes(marginal_entropies, cond_entropies):
factor_entropies = [6, 40, 32, 32]
mutual_infos = marginal_entropies[None] - cond_entropies
mutual_infos = torch.sort(mutual_infos, dim=1, descending=True)[0].clamp(min=0)
mi_normed = mutual_infos / torch.Tensor(factor_entropies).log()[:, None]
metric = eval(metric_name)(mi_normed)
return metric
def compute_metric_faces(marginal_entropies, cond_entropies):
factor_entropies = [21, 11, 11]
mutual_infos = marginal_entropies[None] - cond_entropies
mutual_infos = torch.sort(mutual_infos, dim=1, descending=True)[0].clamp(min=0)
mi_normed = mutual_infos / torch.Tensor(factor_entropies).log()[:, None]
metric = eval(metric_name)(mi_normed)
return metric
| 882 | 31.703704 | 83 | py |
beta-tcvae | beta-tcvae-master/metric_helpers/loader.py | import torch
import lib.dist as dist
import lib.flows as flows
import vae_quant
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=lambda storage, loc: storage)
args = checkpt['args']
state_dict = checkpt['state_dict']
# backwards compatibility
if not hasattr(args, 'conv'):
args.conv = False
if not hasattr(args, 'dist') or args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
# model
if hasattr(args, 'ncon'):
# InfoGAN
model = infogan.Model(
args.latent_dim, n_con=args.ncon, n_cat=args.ncat, cat_dim=args.cat_dim, use_cuda=True, conv=args.conv)
model.load_state_dict(state_dict, strict=False)
vae = vae_quant.VAE(
z_dim=args.ncon, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.encoder = model.encoder
vae.decoder = model.decoder
else:
vae = vae_quant.VAE(
z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
# dataset loader
loader = vae_quant.setup_data_loaders(args)
return vae, loader.dataset, args
| 1,550 | 33.466667 | 115 | py |
beta-tcvae | beta-tcvae-master/lib/functions.py | import torch
from torch.autograd import Function
class STHeaviside(Function):
@staticmethod
def forward(ctx, x):
y = torch.zeros(x.size()).type_as(x)
y[x >= 0] = 1
return y
@staticmethod
def backward(ctx, grad_output):
return grad_output
| 290 | 17.1875 | 44 | py |
beta-tcvae | beta-tcvae-master/lib/utils.py | from numbers import Number
import math
import torch
import os
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.97):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def isnan(tensor):
return (tensor != tensor)
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
| 1,828 | 23.716216 | 75 | py |
beta-tcvae | beta-tcvae-master/lib/datasets.py | import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class Shapes(object):
def __init__(self, dataset_zip=None):
loc = 'data/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
if dataset_zip is None:
self.dataset_zip = np.load(loc, encoding='latin1')
else:
self.dataset_zip = dataset_zip
self.imgs = torch.from_numpy(self.dataset_zip['imgs']).float()
def __len__(self):
return self.imgs.size(0)
def __getitem__(self, index):
x = self.imgs[index].view(1, 64, 64)
return x
class Dataset(object):
def __init__(self, loc):
self.dataset = torch.load(loc).float().div(255).view(-1, 1, 64, 64)
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
return self.dataset[index]
class Faces(Dataset):
LOC = 'data/basel_face_renders.pth'
def __init__(self):
return super(Faces, self).__init__(self.LOC)
| 1,102 | 22.978261 | 75 | py |
beta-tcvae | beta-tcvae-master/lib/dist.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from lib.functions import STHeaviside
eps = 1e-8
class Normal(nn.Module):
"""Samples from a Normal distribution using the reparameterization trick.
"""
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log(2 * np.pi)]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if size is None and mu_logsigma is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0).expand(size)
logsigma = mu_logsigma.select(-1, 1).expand(size)
return mu, logsigma
elif size is not None:
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return mu, logsigma
elif mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0)
logsigma = mu_logsigma.select(-1, 1)
return mu, logsigma
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logsigma={})'.format(
size, mu_logsigma))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def NLL(self, params, sample_params=None):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu, logsigma = self._check_inputs(None, params)
if sample_params is not None:
sample_mu, sample_logsigma = self._check_inputs(None, sample_params)
else:
sample_mu, sample_logsigma = mu, logsigma
c = self.normalization.type_as(sample_mu.data)
nll = logsigma.mul(-2).exp() * (sample_mu - mu).pow(2) \
+ torch.exp(sample_logsigma.mul(2) - logsigma.mul(2)) + 2 * logsigma + c
return nll.mul(0.5)
def kld(self, params):
"""Computes KL(q||p) where q is the given distribution and p
is the standard Normal distribution.
"""
mu, logsigma = self._check_inputs(None, params)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mean^2 - sigma^2)
kld = logsigma.mul(2).add(1) - mu.pow(2) - logsigma.exp().pow(2)
kld.mul_(-0.5)
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Laplace(nn.Module):
"""Samples from a Laplace distribution using the reparameterization trick.
"""
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([-math.log(2)]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if size is None and mu_logscale is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logscale is not None:
mu = mu_logscale.select(-1, 0).expand(size)
logscale = mu_logscale.select(-1, 1).expand(size)
return mu, logscale
elif size is not None:
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return mu, logscale
elif mu_logscale is not None:
mu = mu_logscale.select(-1, 0)
logscale = mu_logscale.select(-1, 1)
return mu, logscale
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logscale={})'.format(
size, mu_logscale))
def sample(self, size=None, params=None):
mu, logscale = self._check_inputs(size, params)
scale = torch.exp(logscale)
# Unif(-0.5, 0.5)
u = Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5
sample = mu - scale * torch.sign(u) * torch.log(1 - 2 * torch.abs(u) + eps)
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logscale = self._check_inputs(None, params)
else:
mu, logscale = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp(-logscale)
ins_exp = - torch.abs(sample - mu) * inv_scale
return ins_exp + c - logscale
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logscale.exp().data[0])
return tmpstr
class Bernoulli(nn.Module):
"""Samples from a Bernoulli distribution where the probability is given
by the sigmoid of the given parameter.
"""
def __init__(self, p=0.5, stgradient=False):
super(Bernoulli, self).__init__()
p = torch.Tensor([p])
self.p = Variable(torch.log(p / (1 - p) + eps))
self.stgradient = stgradient
def _check_inputs(self, size, ps):
if size is None and ps is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and ps is not None:
if ps.ndimension() > len(size):
return ps.squeeze(-1).expand(size)
else:
return ps.expand(size)
elif size is not None:
return self.p.expand(size)
elif ps is not None:
return ps
else:
raise ValueError(
'Given invalid inputs: size={}, ps={})'.format(size, ps))
def _sample_logistic(self, size):
u = Variable(torch.rand(size))
l = torch.log(u + eps) - torch.log(1 - u + eps)
return l
def sample(self, size=None, params=None):
presigm_ps = self._check_inputs(size, params)
logp = F.logsigmoid(presigm_ps)
logq = F.logsigmoid(-presigm_ps)
l = self._sample_logistic(logp.size()).type_as(presigm_ps)
z = logp - logq + l
b = STHeaviside.apply(z)
return b if self.stgradient else b.detach()
def log_density(self, sample, params=None):
presigm_ps = self._check_inputs(sample.size(), params).type_as(sample)
p = (F.sigmoid(presigm_ps) + eps) * (1 - 2 * eps)
logp = sample * torch.log(p + eps) + (1 - sample) * torch.log(1 - p + eps)
return logp
def get_params(self):
return self.p
@property
def nparams(self):
return 1
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return self.stgradient
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f})'.format(
torch.sigmoid(self.p.data)[0])
return tmpstr
| 8,542 | 32.501961 | 84 | py |
beta-tcvae | beta-tcvae-master/lib/flows.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from lib.dist import Normal
eps = 1e-8
class FactorialNormalizingFlow(nn.Module):
def __init__(self, dim, nsteps):
super(FactorialNormalizingFlow, self).__init__()
self.dim = dim
self.nsteps = nsteps
self.x_dist = Normal()
self.scale = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.weight = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.bias = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.reset_parameters()
def reset_parameters(self):
self.scale.data.normal_(0, 0.02)
self.weight.data.normal_(0, 0.02)
self.bias.data.normal_(0, 0.02)
def sample(self, batch_size):
raise NotImplementedError
def log_density(self, y, params=None):
assert(y.size(1) == self.dim)
x = y
logdetgrad = Variable(torch.zeros(y.size()).type_as(y.data))
for i in range(self.nsteps):
u = self.scale[i][None]
w = self.weight[i][None]
b = self.bias[i][None]
act = F.tanh(x * w + b)
x = x + u * act
logdetgrad = logdetgrad + torch.log(torch.abs(1 + u * (1 - act.pow(2)) * w) + eps)
logpx = self.x_dist.log_density(x)
logpy = logpx + logdetgrad
return logpy
| 1,405 | 30.244444 | 94 | py |
gunpowder | gunpowder-master/gunpowder/__init__.py | from __future__ import absolute_import
from .nodes import *
from .array import Array, ArrayKey, ArrayKeys
from .array_spec import ArraySpec
from .batch import Batch
from .batch_request import BatchRequest
from .build import build
from .coordinate import Coordinate
from .graph import Graph, Node, Edge, GraphKey, GraphKeys
from .graph_spec import GraphSpec
from .pipeline import *
from .producer_pool import ProducerPool
from .provider_spec import ProviderSpec
from .roi import Roi
from .version_info import _version as version
import gunpowder.contrib
import gunpowder.tensorflow
import gunpowder.torch
import gunpowder.jax
import gunpowder.zoo
| 648 | 27.217391 | 57 | py |
gunpowder | gunpowder-master/gunpowder/torch/nodes/predict.py | from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import torch
from gunpowder.nodes.generic_predict import GenericPredict
import logging
from typing import Dict, Union
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
"""Torch implementation of :class:`gunpowder.nodes.Predict`.
Args:
model (subclass of ``torch.nn.Module``):
The model to use for prediction.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors (argument names of the
``forward`` method) in the model to array keys.
outputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with the key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
checkpoint: (``string``, optional):
An optional path to the saved parameters for your torch module.
These will be loaded and used for prediction if provided.
device (``string``, optional):
Which device to use for prediction (``"cpu"`` or ``"cuda"``).
Default is ``"cuda"``, which falls back to CPU if CUDA is not
available.
spawn_subprocess (bool, optional): Whether to run ``predict`` in a
separate process. Default is false.
"""
def __init__(
self,
model,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[str, int], ArrayKey],
array_specs: Dict[ArrayKey, ArraySpec] = None,
checkpoint: str = None,
device="cuda",
spawn_subprocess=False,
):
self.array_specs = array_specs if array_specs is not None else {}
if model.training:
logger.warning(
"Model is in training mode during prediction. "
"Consider using model.eval()"
)
super(Predict, self).__init__(
inputs, outputs, array_specs, spawn_subprocess=spawn_subprocess
)
self.device_string = device
self.device = None # to be set in start()
self.model = model
self.checkpoint = checkpoint
self.intermediate_layers = {}
self.register_hooks()
def start(self):
self.use_cuda = torch.cuda.is_available() and self.device_string == "cuda"
logger.info(f"Predicting on {'gpu' if self.use_cuda else 'cpu'}")
self.device = torch.device("cuda" if self.use_cuda else "cpu")
try:
self.model = self.model.to(self.device)
except RuntimeError as e:
raise RuntimeError(
"Failed to move model to device. If you are using a child process "
"to run your model, maybe you already initialized CUDA by sending "
"your model to device in the main process."
) from e
if self.checkpoint is not None:
checkpoint = torch.load(self.checkpoint, map_location=self.device)
if "model_state_dict" in checkpoint:
self.model.load_state_dict(checkpoint["model_state_dict"])
else:
self.model.load_state_dict()
def predict(self, batch, request):
inputs = self.get_inputs(batch)
with torch.no_grad():
out = self.model.forward(**inputs)
outputs = self.get_outputs(out, request)
self.update_batch(batch, request, outputs)
def get_inputs(self, batch):
model_inputs = {
key: torch.as_tensor(batch[value].data, device=self.device)
for key, value in self.inputs.items()
}
return model_inputs
def register_hooks(self):
for key in self.outputs:
if isinstance(key, str):
layer = getattr(self.model, key)
layer.register_forward_hook(self.create_hook(key))
def create_hook(self, key):
def save_layer(module, input, output):
self.intermediate_layers[key] = output
return save_layer
def get_outputs(self, module_out, request):
outputs = {}
if isinstance(module_out, tuple):
module_outs = module_out
else:
module_outs = (module_out,)
for key, value in self.outputs.items():
if value in request:
if isinstance(key, str):
outputs[value] = self.intermediate_layers[key]
elif isinstance(key, int):
outputs[value] = module_outs[key]
return outputs
def update_batch(self, batch, request, requested_outputs):
for array_key, tensor in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor.cpu().detach().numpy(), spec)
def stop(self):
pass
| 5,622 | 34.815287 | 83 | py |
gunpowder | gunpowder-master/gunpowder/torch/nodes/train.py | import logging
import numpy as np
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import torch, tensorboardX, NoSuchModule
from gunpowder.nodes.generic_train import GenericTrain
from typing import Dict, Union, Optional
logger = logging.getLogger(__name__)
class Train(GenericTrain):
"""Torch implementation of :class:`gunpowder.nodes.GenericTrain`.
Args:
model (subclass of ``torch.nn.Module``):
The model to train.
loss:
The torch loss to use.
optimizer:
The torch optimizer to use.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors (argument names of the
``forward`` method) in the model to array keys.
loss_inputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary with the names of input variables to the loss function as
keys, and ArrayKeys containing the desired data as values. Keys can
be either strings or integers. If the key is an integer, it will
be treated as a positional argument to the loss function, a
string will be used as a named argument
outputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with they key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (at the moment only
``output``). This is useful to set the ``voxel_size``, for example,
if they differ from the voxel size of the input arrays. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
checkpoint_basename (``string``, optional):
The basename used for checkpoint files. Defaults to ``model``.
save_every (``int``, optional):
After how many iterations to create a checkpoint to store the
learnt weights.
log_dir (``string``, optional):
Directory for saving tensorboard summaries.
log_every (``int``, optional):
After how many iterations to write out tensorboard summaries.
spawn_subprocess (``bool``, optional):
Whether to run the ``train_step`` in a separate process. Default is false.
"""
def __init__(
self,
model,
loss,
optimizer,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[int, str], ArrayKey],
loss_inputs: Dict[Union[int, str], ArrayKey],
gradients: Dict[Union[int, str], ArrayKey] = {},
array_specs: Optional[Dict[ArrayKey, ArraySpec]] = None,
checkpoint_basename: str = "model",
save_every: int = 2000,
log_dir: str = None,
log_every: int = 1,
spawn_subprocess: bool = False,
):
if not model.training:
logger.warning(
"Model is in evaluation mode during training. "
"Consider using model.train()"
)
# not yet implemented
gradients = gradients
inputs.update(
{k: v for k, v in loss_inputs.items() if v not in outputs.values()}
)
super(Train, self).__init__(
inputs, outputs, gradients, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.loss = loss
self.optimizer = optimizer
self.loss_inputs = loss_inputs
self.checkpoint_basename = checkpoint_basename
self.save_every = save_every
self.iteration = 0
if not isinstance(tensorboardX, NoSuchModule) and log_dir is not None:
self.summary_writer = tensorboardX.SummaryWriter(log_dir)
self.log_every = log_every
else:
self.summary_writer = None
if log_dir is not None:
logger.warning("log_dir given, but tensorboardX is not installed")
self.intermediate_layers = {}
self.register_hooks()
def register_hooks(self):
for key in self.outputs:
if isinstance(key, str):
layer = getattr(self.model, key)
layer.register_forward_hook(self.create_hook(key))
def create_hook(self, key):
def save_layer(module, input, output):
self.intermediate_layers[key] = output
return save_layer
def retain_gradients(self, request, outputs):
for array_name, array_key in self.gradients.items():
if array_key not in request:
continue
if isinstance(array_name, int):
tensor = outputs[array_name]
elif isinstance(array_name, str):
tensor = getattr(self.model, array_name)
else:
raise RuntimeError(
"only ints and strings are supported as gradients keys"
)
tensor.retain_grad()
def start(self):
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
try:
self.model = self.model.to(self.device)
except RuntimeError as e:
raise RuntimeError(
"Failed to move model to device. If you are using a child process "
"to run your model, maybe you already initialized CUDA by sending "
"your model to device in the main process."
) from e
if isinstance(self.loss, torch.nn.Module):
self.loss = self.loss.to(self.device)
checkpoint, self.iteration = self._get_latest_checkpoint(
self.checkpoint_basename
)
if checkpoint is not None:
logger.info("Resuming training from iteration %d", self.iteration)
logger.info("Loading %s", checkpoint)
checkpoint = torch.load(checkpoint, map_location=self.device)
self.model.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
else:
logger.info("Starting training from scratch")
logger.info("Using device %s", self.device)
def train_step(self, batch, request):
inputs = self.__collect_provided_inputs(batch)
requested_outputs = self.__collect_requested_outputs(request)
# keys are argument names of model forward pass
device_inputs = {
k: torch.as_tensor(v, device=self.device) for k, v in inputs.items()
}
# get outputs. Keys are tuple indices or model attr names as in self.outputs
self.optimizer.zero_grad()
model_outputs = self.model(**device_inputs)
if isinstance(model_outputs, tuple):
outputs = {i: model_outputs[i] for i in range(len(model_outputs))}
elif isinstance(model_outputs, torch.Tensor):
outputs = {0: model_outputs}
else:
raise RuntimeError(
"Torch train node only supports return types of tuple",
f"and torch.Tensor from model.forward(). not {type(model_outputs)}",
)
outputs.update(self.intermediate_layers)
# Some inputs to the loss should come from the batch, not the model
provided_loss_inputs = self.__collect_provided_loss_inputs(batch)
device_loss_inputs = {
k: torch.as_tensor(v, device=self.device)
for k, v in provided_loss_inputs.items()
}
# Some inputs to the loss function should come from the outputs of the model
# Update device loss inputs with tensors from outputs if available
flipped_outputs = {v: outputs[k] for k, v in self.outputs.items()}
device_loss_inputs = {
k: flipped_outputs.get(v, device_loss_inputs.get(k))
for k, v in self.loss_inputs.items()
}
device_loss_args = []
for i in range(len(device_loss_inputs)):
if i in device_loss_inputs:
device_loss_args.append(device_loss_inputs.pop(i))
else:
break
device_loss_kwargs = {}
for k, v in list(device_loss_inputs.items()):
if isinstance(k, str):
device_loss_kwargs[k] = device_loss_inputs.pop(k)
assert (
len(device_loss_inputs) == 0
), f"Not all loss inputs could be interpreted. Failed keys: {device_loss_inputs.keys()}"
self.retain_gradients(request, outputs)
logger.debug("model outputs: %s", {k: v.shape for k, v in outputs.items()})
logger.debug(
"loss inputs: %s %s",
[v.shape for v in device_loss_args],
{k: v.shape for k, v in device_loss_kwargs.items()},
)
loss = self.loss(*device_loss_args, **device_loss_kwargs)
loss.backward()
self.optimizer.step()
# add requested model outputs to batch
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(
outputs[array_name].cpu().detach().numpy(), spec
)
for array_name, array_key in self.gradients.items():
if array_key not in request:
continue
if isinstance(array_name, int):
tensor = outputs[array_name]
elif isinstance(array_name, str):
tensor = getattr(self.model, array_name)
else:
raise RuntimeError(
"only ints and strings are supported as gradients keys"
)
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor.grad.cpu().detach().numpy(), spec)
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(
outputs[array_name].cpu().detach().numpy(), spec
)
batch.loss = loss.cpu().detach().numpy()
self.iteration += 1
batch.iteration = self.iteration
if batch.iteration % self.save_every == 0:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename, batch.iteration
)
logger.info("Creating checkpoint %s", checkpoint_name)
torch.save(
{
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
},
checkpoint_name,
)
if self.summary_writer and batch.iteration % self.log_every == 0:
self.summary_writer.add_scalar("loss", batch.loss, batch.iteration)
def __collect_requested_outputs(self, request):
array_outputs = {}
for output_name, array_key in self.outputs.items():
if array_key in request:
array_outputs[array_key] = output_name
return array_outputs
def __collect_provided_inputs(self, batch):
return self.__collect_provided_arrays(
{k: v for k, v in self.inputs.items() if k not in self.loss_inputs}, batch
)
def __collect_provided_loss_inputs(self, batch):
return self.__collect_provided_arrays(
self.loss_inputs, batch, expect_missing_arrays=True
)
def __collect_provided_arrays(self, reference, batch, expect_missing_arrays=False):
arrays = {}
for array_name, array_key in reference.items():
if isinstance(array_key, ArrayKey):
msg = f"batch does not contain {array_key}, array {array_name} will not be set"
if array_key in batch.arrays:
arrays[array_name] = batch.arrays[array_key].data
elif not expect_missing_arrays:
logger.warn(msg)
else:
logger.debug(msg)
elif isinstance(array_key, np.ndarray):
arrays[array_name] = array_key
elif isinstance(array_key, str):
arrays[array_name] = getattr(batch, array_key)
else:
raise Exception(
"Unknown network array key {}, can't be given to "
"network".format(array_key)
)
return arrays
| 13,024 | 36.002841 | 96 | py |
gunpowder | gunpowder-master/gunpowder/jax/generic_jax_model.py | class GenericJaxModel:
"""An interface for models to follow in order to train or predict. A model
implementing this interface will need to contain not only the forward
model but also loss and update fn. Some examples can be found in
https://github.com/funkelab/funlib.learn.jax
Args:
is_training (``bool``):
Indicating whether the model will be used for training
or inferencing.
"""
def __init__(self, is_training):
pass
def initialize(self, rng_key, inputs):
"""Initialize parameters for training.
Args:
rng_key (jax.random.PRNGKey):
Seed for parameter initialization
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs, provided to initialize parameters
with the correct dimensions.
Return:
params (Any):
Function should return an object encapsulating different
parameters of the model.
"""
raise RuntimeError("Unimplemented")
def forward(self, params, inputs):
"""Run the forward model.
Args:
params (Any):
Model parameters.
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs.
Return:
outputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of outputs.
"""
raise RuntimeError("Unimplemented")
def train_step(self, params, inputs, pmapped):
"""Run one iteration of training on the model.
Args:
params (Any):
Model parameters.
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs.
pmapped (``bool``):
Whether the function is run with `jax.pmap` or not.
If pmapped across devices, the function should take care to
synchronize gradients during the train step.
The `axis_name` is set to the ``string`` "num_devices"
Return:
Tuple(new_params, outputs, loss)
new_params (Any):
Updated model parameters.
outputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of outputs.
loss (Union[``float``, (``dict``, ``string`` -> ``float``)]):
Loss value of this iteration. Value can either be a single
``float`` or a dictionary of multiple losses.
"""
raise RuntimeError("Unimplemented")
| 2,633 | 25.34 | 78 | py |
gunpowder | gunpowder-master/gunpowder/jax/__init__.py | from .generic_jax_model import GenericJaxModel
from .nodes import *
| 68 | 22 | 46 | py |
gunpowder | gunpowder-master/gunpowder/jax/nodes/predict.py | from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import jax
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.jax import GenericJaxModel
import pickle
import logging
from typing import Dict, Union
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
"""JAX implementation of :class:`gunpowder.nodes.Predict`.
Args:
model (subclass of ``gunpowder.jax.GenericJaxModel``):
The model to use for prediction.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors in the network to
array keys.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to array
keys. New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
checkpoint: (``string``, optional):
An optional path to the saved parameters for your jax module.
These will be loaded and used for prediction if provided.
spawn_subprocess (bool, optional): Whether to run ``predict`` in a
separate process. Default is false.
"""
def __init__(
self,
model: GenericJaxModel,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[str, int], ArrayKey],
array_specs: Dict[ArrayKey, ArraySpec] = None,
checkpoint: str = None,
spawn_subprocess=False,
):
self.array_specs = array_specs if array_specs is not None else {}
super(Predict, self).__init__(
inputs, outputs, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.checkpoint = checkpoint
self.model_params = None
def start(self):
if self.checkpoint is not None:
with open(self.checkpoint, "rb") as f:
self.model_params = pickle.load(f)
def predict(self, batch, request):
inputs = self.get_inputs(batch)
if self.model_params is None:
# need to init model first
rng = jax.random.PRNGKey(request.random_seed)
self.model_params = self.model.initialize(rng, inputs)
out = jax.jit(self.model.forward)(self.model_params, inputs)
outputs = self.get_outputs(out, request)
self.update_batch(batch, request, outputs)
def get_inputs(self, batch):
model_inputs = {
key: jax.device_put(batch[value].data) for key, value in self.inputs.items()
}
return model_inputs
def get_outputs(self, module_out, request):
outputs = {}
for key, value in self.outputs.items():
if value in request:
outputs[value] = module_out[key]
return outputs
def update_batch(self, batch, request, requested_outputs):
for array_key, tensor in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor, spec)
def stop(self):
pass
| 3,564 | 32.317757 | 88 | py |
gunpowder | gunpowder-master/gunpowder/jax/nodes/train.py | import logging
import numpy as np
from gunpowder.ext import jax
from gunpowder.ext import jnp
import pickle
import os
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import tensorboardX, NoSuchModule
from gunpowder.nodes.generic_train import GenericTrain
from gunpowder.jax import GenericJaxModel
from typing import Dict, Union, Optional
logger = logging.getLogger(__name__)
class Train(GenericTrain):
"""JAX implementation of :class:`gunpowder.nodes.GenericTrain`.
Args:
model (subclass of ``gunpowder.jax.GenericJaxModel``):
The model to train. This model encapsulates the forward model,
loss, and optimizer.
inputs (``dict``, ``string`` -> Union[np.ndarray, ArrayKey]):
Dictionary from the names of input tensors expected by the
``train_step`` method to array keys or ndarray.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with they key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (at the moment only
``output``). This is useful to set the ``voxel_size``, for example,
if they differ from the voxel size of the input arrays. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
checkpoint_basename (``string``, optional):
The basename used for checkpoint files. Defaults to ``model``.
save_every (``int``, optional):
After how many iterations to create a checkpoint to store the
learnt weights.
keep_n_checkpoints (``int``, optional):
Number of checkpoints to keep. Node will attempt to delete older
checkpoints. Default is `None` (no deletion).
log_dir (``string``, optional):
Directory for saving tensorboard summaries.
log_every (``int``, optional):
After how many iterations to write out tensorboard summaries.
spawn_subprocess (``bool``, optional):
Whether to run the ``train_step`` in a separate process. Default is
false.
n_devices (``int``, optional):
Number of GPU devices to train on concurrently using `jax.pmap`. If
`None`, the number of available GPUs will be automatically detected
and used.
validate_fn (function -> Union[``float``, (``dict``, ``string`` -> ``float``)] , optional):
Function to run validation on, which should has the form of
def validate_fn(model, params)
where `model` is the same provided `GenericJaxModel` model and
`params` is the parameter of this model, and returns either a
``float`` (one loss) or a dictionary of losses to record in
tensorboard.
validate_every (``int``, optional):
After how many iterations to run `validate_fn`.
"""
def __init__(
self,
model: GenericJaxModel,
inputs: Dict[str, Union[np.ndarray, ArrayKey]],
outputs: Dict[Union[int, str], ArrayKey],
gradients: Dict[Union[int, str], ArrayKey] = {},
array_specs: Optional[Dict[ArrayKey, ArraySpec]] = None,
checkpoint_basename: str = "model",
save_every: int = 2000,
keep_n_checkpoints: Optional[int] = None,
log_dir: str = None,
log_every: int = 1,
spawn_subprocess: bool = False,
n_devices: Optional[int] = None,
validate_fn=None,
validate_every=None,
):
# not yet implemented
gradients = gradients
super(Train, self).__init__(
inputs, outputs, gradients, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.checkpoint_basename = checkpoint_basename
self.save_every = save_every
if n_devices is None:
n_devices = jax.local_device_count() # autodetect available GPUs
self.n_devices = n_devices
self.local_devices = jax.local_devices()
self.keep_n_checkpoints = keep_n_checkpoints
self.iteration = 0
if not isinstance(tensorboardX, NoSuchModule) and log_dir is not None:
self.summary_writer = tensorboardX.SummaryWriter(log_dir)
self.log_every = log_every
else:
self.summary_writer = None
if log_dir is not None:
logger.warning("log_dir given, but tensorboardX is not installed")
self.intermediate_layers = {}
self.validate_fn = validate_fn
self.validate_every = validate_every
def replicate_params(self, params):
return jax.tree_map(lambda x: jnp.array([x] * self.n_devices), params)
def start(self):
checkpoint, self.iteration = self._get_latest_checkpoint(
self.checkpoint_basename
)
if checkpoint is not None:
logger.info("Resuming training from iteration %d", self.iteration)
with open(checkpoint, "rb") as f:
self.model_params = pickle.load(f)
if self.n_devices > 1:
self.model_params = self.replicate_params(self.model_params)
else:
logger.info("Starting training from scratch")
self.model_params = None
def split_inputs(self, inputs):
for k, arr in inputs.items():
assert arr.shape[0] % self.n_devices == 0, (
f"Batch size should be evenly divisible by the number of "
f"devices. Input array shape is {arr.shape} but n_device is"
f" {self.n_devices}"
)
inputs[k] = arr.reshape(
self.n_devices, arr.shape[0] // self.n_devices, *arr.shape[1:]
)
inputs[k] = [x for x in inputs[k]] # make a sequence for put_sharded
return inputs
def unstack_device_outputs(self, outputs):
for k, arr in outputs.items():
outputs[k] = arr.reshape(arr.shape[0] * arr.shape[1], *arr.shape[2:])
return outputs
def train_step(self, batch, request):
inputs = self.__collect_provided_inputs(batch)
if self.n_devices > 1:
inputs = self.split_inputs(inputs)
# put to device for max performance
if self.n_devices > 1:
for k, v in inputs.items():
inputs[k] = jax.device_put_sharded(v, jax.local_devices())
else:
for k, v in inputs.items():
inputs[k] = jax.device_put(v)
# initialize model if necessary
if self.model_params is None:
# Use the random seed of first request to initialize model's weight
rng = jax.random.PRNGKey(request.random_seed)
if self.n_devices > 1:
rng = jnp.broadcast_to(rng, (self.n_devices,) + rng.shape)
self.model_params = jax.pmap(self.model.initialize)(rng, inputs)
else:
self.model_params = self.model.initialize(rng, inputs)
requested_outputs = self.__collect_requested_outputs(request)
if self.n_devices > 1:
self.model_params, outputs, loss = jax.pmap(
self.model.train_step,
axis_name="num_devices",
donate_argnums=(0,),
static_broadcasted_argnums=(2,),
)(self.model_params, inputs, True)
loss = loss.mean()
outputs = self.unstack_device_outputs(outputs) # stack by batch
else:
self.model_params, outputs, loss = jax.jit(
self.model.train_step, donate_argnums=(0,), static_argnums=(2,)
)(self.model_params, inputs, False)
logger.debug("model outputs: %s", {k: v.shape for k, v in outputs.items()})
# add requested model outputs to batch
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(outputs[array_name], spec)
if isinstance(loss, dict):
total = 0.0
for k, v in loss.items():
total += v
batch.loss = total
else:
batch.loss = loss
self.iteration += 1
batch.iteration = self.iteration
if batch.iteration % self.save_every == 0:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename, batch.iteration
)
logger.info("Creating checkpoint %s", checkpoint_name)
model_state = self.model_params
if self.n_devices > 1:
# get only a single copy of param for saving
model_state = jax.tree_map(lambda x: x[0], model_state)
with open(checkpoint_name, "wb") as f:
pickle.dump(model_state, f)
if self.keep_n_checkpoints:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename,
batch.iteration - self.keep_n_checkpoints * self.save_every,
)
try:
os.remove(checkpoint_name)
logger.info("Removed checkpoint %s", checkpoint_name)
except FileNotFoundError:
pass
if self.summary_writer and batch.iteration % self.log_every == 0:
if isinstance(loss, dict):
for k, v in loss.items():
self.summary_writer.add_scalar(k, v, batch.iteration)
else:
self.summary_writer.add_scalar("loss", loss, batch.iteration)
# run validate
if self.validate_fn is not None and batch.iteration % self.validate_every == 0:
val_ret = self.validate_fn(self.model, self.model_params)
if isinstance(val_ret, dict):
for k, v in val_ret.items():
self.summary_writer.add_scalar(k, v, batch.iteration)
else:
self.summary_writer.add_scalar("validate", val_ret, batch.iteration)
def __collect_requested_outputs(self, request):
array_outputs = {}
for output_name, array_key in self.outputs.items():
if array_key in request:
array_outputs[array_key] = output_name
return array_outputs
def __collect_provided_inputs(self, batch):
return self.__collect_provided_arrays(self.inputs, batch)
def __collect_provided_arrays(self, reference, batch):
arrays = {}
for array_name, array_key in reference.items():
if isinstance(array_key, ArrayKey):
msg = f"batch does not contain {array_key}, array {array_name} will not be set"
if array_key in batch.arrays:
arrays[array_name] = batch.arrays[array_key].data
else:
logger.warn(msg)
elif isinstance(array_key, np.ndarray):
arrays[array_name] = array_key
else:
raise Exception(
"Unknown network array key {}, can't be given to "
"network".format(array_key)
)
return arrays
| 11,796 | 36.690096 | 99 | py |
gunpowder | gunpowder-master/gunpowder/ext/__init__.py | from __future__ import print_function
import logging
import traceback
import sys
logger = logging.getLogger(__name__)
class NoSuchModule(object):
def __init__(self, name):
self.__name = name
self.__traceback_str = traceback.format_tb(sys.exc_info()[2])
errtype, value = sys.exc_info()[:2]
self.__exception = errtype(value)
def __getattr__(self, item):
raise self.__exception
try:
import dvision
except ImportError as e:
dvision = NoSuchModule("dvision")
try:
import h5py
except ImportError as e:
h5py = NoSuchModule("h5py")
try:
import pyklb
except ImportError as e:
pyklb = NoSuchModule("pyklb")
try:
import tensorflow
except ImportError as e:
tensorflow = NoSuchModule("tensorflow")
try:
import torch
except ImportError as e:
torch = NoSuchModule("torch")
try:
import tensorboardX
except ImportError as e:
tensorboardX = NoSuchModule("tensorboardX")
try:
import malis
except ImportError as e:
malis = NoSuchModule("malis")
try:
import augment
except ImportError as e:
augment = NoSuchModule("augment")
try:
import zarr
from .zarr_file import ZarrFile
except ImportError as e:
zarr = NoSuchModule("zarr")
ZarrFile = None
try:
import daisy
except ImportError as e:
daisy = NoSuchModule("daisy")
try:
import jax
except ImportError as e:
jax = NoSuchModule("jax")
try:
import jax.numpy as jnp
except ImportError as e:
jnp = NoSuchModule("jnp")
try:
import haiku
except ImportError as e:
haiku = NoSuchModule("haiku")
try:
import optax
except ImportError as e:
optax = NoSuchModule("optax")
| 1,676 | 17.228261 | 69 | py |
gunpowder | gunpowder-master/tests/cases/jax_train.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
ArraySpec,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
Array,
Batch,
Scan,
PreCache,
build,
)
from gunpowder.ext import jax, haiku, optax, NoSuchModule
from gunpowder.jax import Train, Predict, GenericJaxModel
from unittest import skipIf, expectedFailure
import numpy as np
import logging
# use CPU for JAX tests and avoid GPU compatibility
if not isinstance(jax, NoSuchModule):
jax.config.update("jax_platform_name", "cpu")
class ExampleJaxTrain2DSource(BatchProvider):
def __init__(self):
pass
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (17, 17)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
x = np.array(list(range(17)), dtype=np.float32).reshape([17, 1])
x = x + x.T
batch.arrays[ArrayKeys.A] = Array(x, spec).crop(request[ArrayKeys.A].roi)
return batch
class ExampleJaxTrainSource(BatchProvider):
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (2, 2)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
self.provides(ArrayKeys.B, spec)
spec = ArraySpec(nonspatial=True)
self.provides(ArrayKeys.C, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
spec.roi = request[ArrayKeys.A].roi
batch.arrays[ArrayKeys.A] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.B]
spec.roi = request[ArrayKeys.B].roi
batch.arrays[ArrayKeys.B] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.C]
batch.arrays[ArrayKeys.C] = Array(np.array([1], dtype=np.float32), spec)
return batch
@skipIf(isinstance(jax, NoSuchModule), "Jax is not installed")
class TestJaxTrain(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.jax.nodes.train").setLevel(logging.INFO)
checkpoint_basename = self.path_to("model")
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
ArrayKey("C_PREDICTED")
ArrayKey("C_GRADIENT")
class ExampleModel(GenericJaxModel):
def __init__(self, is_training):
super().__init__(is_training)
def _linear(x):
return haiku.Linear(1, False)(x)
self.linear = haiku.without_apply_rng(haiku.transform(_linear))
self.opt = optax.sgd(learning_rate=1e-7, momentum=0.999)
def initialize(self, rng_key, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
weight = self.linear.init(rng_key, a * b)
opt_state = self.opt.init(weight)
return (weight, opt_state)
def forward(self, params, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
return {"c": self.linear.apply(params[0], a * b)}
def _loss_fn(self, weight, a, b, c):
c_pred = self.linear.apply(weight, a * b)
loss = optax.l2_loss(predictions=c_pred, targets=c) * 2
loss_mean = loss.mean()
return loss_mean, (c_pred, loss, loss_mean)
def _apply_optimizer(self, params, grads):
updates, new_opt_state = self.opt.update(grads, params[1])
new_weight = optax.apply_updates(params[0], updates)
return new_weight, new_opt_state
def train_step(self, params, inputs, pmapped=False):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
c = inputs["c"].reshape(-1)
grads, (c_pred, loss, loss_mean) = jax.grad(
self._loss_fn, has_aux=True
)(params[0], a, b, c)
new_weight, new_opt_state = self._apply_optimizer(params, grads)
new_params = (new_weight, new_opt_state)
outputs = {
"c_pred": c_pred,
"grad": loss,
}
return new_params, outputs, loss_mean
model = ExampleModel(is_training=False)
source = ExampleJaxTrainSource()
train = Train(
model=model,
inputs={"a": ArrayKeys.A, "b": ArrayKeys.B, "c": ArrayKeys.C},
outputs={"c_pred": ArrayKeys.C_PREDICTED, "grad": ArrayKeys.C_GRADIENT},
array_specs={
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
},
checkpoint_basename=checkpoint_basename,
save_every=100,
spawn_subprocess=True,
n_devices=1,
)
pipeline = source + train
request = BatchRequest(
{
ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.C: ArraySpec(nonspatial=True),
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
for i in range(200 - 1):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# resume training
with build(pipeline):
for i in range(100):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
@skipIf(isinstance(jax, NoSuchModule), "Jax is not installed")
class TestJaxPredict(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.jax.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
b = ArrayKey("B")
c = ArrayKey("C")
c_pred = ArrayKey("C_PREDICTED")
d_pred = ArrayKey("D_PREDICTED")
class ExampleModel(GenericJaxModel):
def __init__(self, is_training):
super().__init__(is_training)
def _linear(x):
return haiku.Linear(1, False)(x)
self.linear = haiku.without_apply_rng(haiku.transform(_linear))
def initialize(self, rng_key, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
weight = self.linear.init(rng_key, a * b)
weight["linear"]["w"] = (
weight["linear"]["w"].at[:].set(np.array([[1], [1], [1], [1]]))
)
return weight
def forward(self, params, inputs):
a = inputs["a"].reshape(-1)
b = inputs["b"].reshape(-1)
c_pred = self.linear.apply(params, a * b)
d_pred = c_pred * 2
return {"c": c_pred, "d": d_pred}
model = ExampleModel(is_training=False)
source = ExampleJaxTrainSource()
predict = Predict(
model=model,
inputs={"a": a, "b": b},
outputs={"c": c_pred, "d": d_pred},
array_specs={
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
},
spawn_subprocess=True,
)
pipeline = source + predict
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (2, 2))),
b: ArraySpec(roi=Roi((0, 0), (2, 2))),
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch1 = pipeline.request_batch(request)
batch2 = pipeline.request_batch(request)
assert np.isclose(batch1[c_pred].data, batch2[c_pred].data)
assert np.isclose(batch1[c_pred].data, 1 + 4 + 9)
assert np.isclose(batch2[d_pred].data, 2 * (1 + 4 + 9))
| 8,810 | 31.274725 | 84 | py |
gunpowder | gunpowder-master/tests/cases/torch_train.py | from .provider_test import ProviderTest
from gunpowder import (
BatchProvider,
BatchRequest,
ArraySpec,
Roi,
Coordinate,
ArrayKeys,
ArrayKey,
Array,
Batch,
Scan,
PreCache,
build,
)
from gunpowder.ext import torch, NoSuchModule
from gunpowder.torch import Train, Predict
from unittest import skipIf, expectedFailure
import numpy as np
import logging
class ExampleTorchTrain2DSource(BatchProvider):
def __init__(self):
pass
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (17, 17)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
x = np.array(list(range(17)), dtype=np.float32).reshape([17, 1])
x = x + x.T
batch.arrays[ArrayKeys.A] = Array(x, spec).crop(request[ArrayKeys.A].roi)
return batch
class ExampleTorchTrainSource(BatchProvider):
def setup(self):
spec = ArraySpec(
roi=Roi((0, 0), (2, 2)),
dtype=np.float32,
interpolatable=True,
voxel_size=(1, 1),
)
self.provides(ArrayKeys.A, spec)
self.provides(ArrayKeys.B, spec)
spec = ArraySpec(nonspatial=True)
self.provides(ArrayKeys.C, spec)
def provide(self, request):
batch = Batch()
spec = self.spec[ArrayKeys.A]
spec.roi = request[ArrayKeys.A].roi
batch.arrays[ArrayKeys.A] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.B]
spec.roi = request[ArrayKeys.B].roi
batch.arrays[ArrayKeys.B] = Array(
np.array([[0, 1], [2, 3]], dtype=np.float32), spec
)
spec = self.spec[ArrayKeys.C]
batch.arrays[ArrayKeys.C] = Array(np.array([1], dtype=np.float32), spec)
return batch
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchTrain(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.torch.nodes.train").setLevel(logging.INFO)
checkpoint_basename = self.path_to("model")
ArrayKey("A")
ArrayKey("B")
ArrayKey("C")
ArrayKey("C_PREDICTED")
ArrayKey("C_GRADIENT")
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Linear(4, 1, False)
def forward(self, a, b):
a = a.reshape(-1)
b = b.reshape(-1)
return self.linear(a * b)
model = ExampleModel()
loss = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-7, momentum=0.999)
source = ExampleTorchTrainSource()
train = Train(
model=model,
optimizer=optimizer,
loss=loss,
inputs={"a": ArrayKeys.A, "b": ArrayKeys.B},
loss_inputs={0: ArrayKeys.C_PREDICTED, 1: ArrayKeys.C},
outputs={0: ArrayKeys.C_PREDICTED},
gradients={0: ArrayKeys.C_GRADIENT},
array_specs={
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
},
checkpoint_basename=checkpoint_basename,
save_every=100,
spawn_subprocess=True,
)
pipeline = source + train
request = BatchRequest(
{
ArrayKeys.A: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.B: ArraySpec(roi=Roi((0, 0), (2, 2))),
ArrayKeys.C: ArraySpec(nonspatial=True),
ArrayKeys.C_PREDICTED: ArraySpec(nonspatial=True),
ArrayKeys.C_GRADIENT: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
for i in range(200 - 1):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
# resume training
with build(pipeline):
for i in range(100):
loss1 = batch.loss
batch = pipeline.request_batch(request)
loss2 = batch.loss
self.assertLess(loss2, loss1)
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchPredict(ProviderTest):
def test_output(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
b = ArrayKey("B")
c = ArrayKey("C")
c_pred = ArrayKey("C_PREDICTED")
d_pred = ArrayKey("D_PREDICTED")
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Linear(4, 1, False)
self.linear.weight.data = torch.Tensor([1, 1, 1, 1])
def forward(self, a, b):
a = a.reshape(-1)
b = b.reshape(-1)
c_pred = self.linear(a * b)
d_pred = c_pred * 2
return d_pred
model = ExampleModel()
source = ExampleTorchTrainSource()
predict = Predict(
model=model,
inputs={"a": a, "b": b},
outputs={"linear": c_pred, 0: d_pred},
array_specs={
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
},
spawn_subprocess=True,
)
pipeline = source + predict
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (2, 2))),
b: ArraySpec(roi=Roi((0, 0), (2, 2))),
c: ArraySpec(nonspatial=True),
c_pred: ArraySpec(nonspatial=True),
d_pred: ArraySpec(nonspatial=True),
}
)
# train for a couple of iterations
with build(pipeline):
batch1 = pipeline.request_batch(request)
batch2 = pipeline.request_batch(request)
assert np.isclose(batch1[c_pred].data, batch2[c_pred].data)
assert np.isclose(batch1[c_pred].data, 1 + 4 + 9)
assert np.isclose(batch2[d_pred].data, 2 * (1 + 4 + 9))
if not isinstance(torch, NoSuchModule):
class ExampleModel(torch.nn.Module):
def __init__(self):
super(ExampleModel, self).__init__()
self.linear = torch.nn.Conv2d(1, 1, 3)
def forward(self, a):
a = a.unsqueeze(0).unsqueeze(0)
pred = self.linear(a)
a = a.squeeze(0).squeeze(0)
pred = pred.squeeze(0).squeeze(0)
return pred
@skipIf(isinstance(torch, NoSuchModule), "torch is not installed")
class TestTorchPredictMultiprocessing(ProviderTest):
def test_scan(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
pred = ArrayKey("PRED")
model = ExampleModel()
reference_request = BatchRequest()
reference_request[a] = ArraySpec(roi=Roi((0, 0), (7, 7)))
reference_request[pred] = ArraySpec(roi=Roi((1, 1), (5, 5)))
source = ExampleTorchTrain2DSource()
predict = Predict(
model=model,
inputs={"a": a},
outputs={0: pred},
array_specs={pred: ArraySpec()},
)
pipeline = source + predict + Scan(reference_request, num_workers=2)
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (17, 17))),
pred: ArraySpec(roi=Roi((0, 0), (15, 15))),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
assert pred in batch
def test_precache(self):
logging.getLogger("gunpowder.torch.nodes.predict").setLevel(logging.INFO)
a = ArrayKey("A")
pred = ArrayKey("PRED")
model = ExampleModel()
reference_request = BatchRequest()
reference_request[a] = ArraySpec(roi=Roi((0, 0), (7, 7)))
reference_request[pred] = ArraySpec(roi=Roi((1, 1), (5, 5)))
source = ExampleTorchTrain2DSource()
predict = Predict(
model=model,
inputs={"a": a},
outputs={0: pred},
array_specs={pred: ArraySpec()},
)
pipeline = source + predict + PreCache(cache_size=3, num_workers=2)
request = BatchRequest(
{
a: ArraySpec(roi=Roi((0, 0), (17, 17))),
pred: ArraySpec(roi=Roi((0, 0), (15, 15))),
}
)
# train for a couple of iterations
with build(pipeline):
batch = pipeline.request_batch(request)
assert pred in batch
| 9,228 | 29.259016 | 81 | py |
gunpowder | gunpowder-master/docs/build/conf.py | # -*- coding: utf-8 -*-
#
# gunpowder documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 30 12:59:21 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'jupyter_sphinx'
]
def setup(app):
app.add_css_file('custom.css')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gunpowder'
author = u'Jan Funke, Will Patton, Renate Krause, Julia Buhmann, Rodrigo Ceballos Lentini, William Grisaitis, Chris Barnes, Caroline Malin-Mayor, Larissa Heinrich, Philipp Hanslovsky, Sherry Ding, Andrew Champion, Arlo Sheridan, Constantin Pape'
copyright = u'2020, ' + author
here = os.path.abspath(os.path.dirname(__file__))
version_info = {}
with open(os.path.join(here, '..', '..', 'gunpowder', 'version_info.py')) as fp:
exec(fp.read(), version_info)
gp_version = version_info['_version']
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'{}.{}'.format(gp_version.major(), gp_version.minor())
# The full version, including alpha/beta/rc tags.
release = str(gp_version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = ['_themes']
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'logo_only': True
}
html_logo = 'gunpowder.svg'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'gunpowderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gunpowder.tex', u'gunpowder Documentation',
u'Jan Funke', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gunpowder', u'gunpowder Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gunpowder', u'gunpowder Documentation',
author, 'gunpowder', 'One line description of project.',
'Miscellaneous'),
]
| 5,592 | 30.24581 | 245 | py |
treelstm.pytorch | treelstm.pytorch-master/main.py | from __future__ import division
from __future__ import print_function
import os
import random
import logging
import torch
import torch.nn as nn
import torch.optim as optim
# IMPORT CONSTANTS
from treelstm import Constants
# NEURAL NETWORK MODULES/LAYERS
from treelstm import SimilarityTreeLSTM
# DATA HANDLING CLASSES
from treelstm import Vocab
# DATASET CLASS FOR SICK DATASET
from treelstm import SICKDataset
# METRICS CLASS FOR EVALUATION
from treelstm import Metrics
# UTILITY FUNCTIONS
from treelstm import utils
# TRAIN AND TEST HELPER FUNCTIONS
from treelstm import Trainer
# CONFIG PARSER
from config import parse_args
# MAIN BLOCK
def main():
global args
args = parse_args()
# global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
# file logger
fh = logging.FileHandler(os.path.join(args.save, args.expname)+'.log', mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
# console logger
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
# argument validation
args.cuda = args.cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.sparse and args.wd != 0:
logger.error('Sparsity and weight decay are incompatible, pick one!')
exit()
logger.debug(args)
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
if not os.path.exists(args.save):
os.makedirs(args.save)
train_dir = os.path.join(args.data, 'train/')
dev_dir = os.path.join(args.data, 'dev/')
test_dir = os.path.join(args.data, 'test/')
# write unique words from all token files
sick_vocab_file = os.path.join(args.data, 'sick.vocab')
if not os.path.isfile(sick_vocab_file):
token_files_b = [os.path.join(split, 'b.toks') for split in [train_dir, dev_dir, test_dir]]
token_files_a = [os.path.join(split, 'a.toks') for split in [train_dir, dev_dir, test_dir]]
token_files = token_files_a + token_files_b
sick_vocab_file = os.path.join(args.data, 'sick.vocab')
utils.build_vocab(token_files, sick_vocab_file)
# get vocab object from vocab file previously written
vocab = Vocab(filename=sick_vocab_file,
data=[Constants.PAD_WORD, Constants.UNK_WORD,
Constants.BOS_WORD, Constants.EOS_WORD])
logger.debug('==> SICK vocabulary size : %d ' % vocab.size())
# load SICK dataset splits
train_file = os.path.join(args.data, 'sick_train.pth')
if os.path.isfile(train_file):
train_dataset = torch.load(train_file)
else:
train_dataset = SICKDataset(train_dir, vocab, args.num_classes)
torch.save(train_dataset, train_file)
logger.debug('==> Size of train data : %d ' % len(train_dataset))
dev_file = os.path.join(args.data, 'sick_dev.pth')
if os.path.isfile(dev_file):
dev_dataset = torch.load(dev_file)
else:
dev_dataset = SICKDataset(dev_dir, vocab, args.num_classes)
torch.save(dev_dataset, dev_file)
logger.debug('==> Size of dev data : %d ' % len(dev_dataset))
test_file = os.path.join(args.data, 'sick_test.pth')
if os.path.isfile(test_file):
test_dataset = torch.load(test_file)
else:
test_dataset = SICKDataset(test_dir, vocab, args.num_classes)
torch.save(test_dataset, test_file)
logger.debug('==> Size of test data : %d ' % len(test_dataset))
# initialize model, criterion/loss_function, optimizer
model = SimilarityTreeLSTM(
vocab.size(),
args.input_dim,
args.mem_dim,
args.hidden_dim,
args.num_classes,
args.sparse,
args.freeze_embed)
criterion = nn.KLDivLoss()
# for words common to dataset vocab and GLOVE, use GLOVE vectors
# for other words in dataset vocab, use random normal vectors
emb_file = os.path.join(args.data, 'sick_embed.pth')
if os.path.isfile(emb_file):
emb = torch.load(emb_file)
else:
# load glove embeddings and vocab
glove_vocab, glove_emb = utils.load_word_vectors(
os.path.join(args.glove, 'glove.840B.300d'))
logger.debug('==> GLOVE vocabulary size: %d ' % glove_vocab.size())
emb = torch.zeros(vocab.size(), glove_emb.size(1), dtype=torch.float, device=device)
emb.normal_(0, 0.05)
# zero out the embeddings for padding and other special words if they are absent in vocab
for idx, item in enumerate([Constants.PAD_WORD, Constants.UNK_WORD,
Constants.BOS_WORD, Constants.EOS_WORD]):
emb[idx].zero_()
for word in vocab.labelToIdx.keys():
if glove_vocab.getIndex(word):
emb[vocab.getIndex(word)] = glove_emb[glove_vocab.getIndex(word)]
torch.save(emb, emb_file)
# plug these into embedding matrix inside model
model.emb.weight.data.copy_(emb)
model.to(device), criterion.to(device)
if args.optim == 'adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'adagrad':
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
elif args.optim == 'sgd':
optimizer = optim.SGD(filter(lambda p: p.requires_grad,
model.parameters()), lr=args.lr, weight_decay=args.wd)
metrics = Metrics(args.num_classes)
# create trainer object for training and testing
trainer = Trainer(args, model, criterion, optimizer, device)
best = -float('inf')
for epoch in range(args.epochs):
train_loss = trainer.train(train_dataset)
train_loss, train_pred = trainer.test(train_dataset)
dev_loss, dev_pred = trainer.test(dev_dataset)
test_loss, test_pred = trainer.test(test_dataset)
train_pearson = metrics.pearson(train_pred, train_dataset.labels)
train_mse = metrics.mse(train_pred, train_dataset.labels)
logger.info('==> Epoch {}, Train \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, train_loss, train_pearson, train_mse))
dev_pearson = metrics.pearson(dev_pred, dev_dataset.labels)
dev_mse = metrics.mse(dev_pred, dev_dataset.labels)
logger.info('==> Epoch {}, Dev \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, dev_loss, dev_pearson, dev_mse))
test_pearson = metrics.pearson(test_pred, test_dataset.labels)
test_mse = metrics.mse(test_pred, test_dataset.labels)
logger.info('==> Epoch {}, Test \tLoss: {}\tPearson: {}\tMSE: {}'.format(
epoch, test_loss, test_pearson, test_mse))
if best < test_pearson:
best = test_pearson
checkpoint = {
'model': trainer.model.state_dict(),
'optim': trainer.optimizer,
'pearson': test_pearson, 'mse': test_mse,
'args': args, 'epoch': epoch
}
logger.debug('==> New optimum found, checkpointing everything now...')
torch.save(checkpoint, '%s.pt' % os.path.join(args.save, args.expname))
if __name__ == "__main__":
main()
| 7,610 | 39.484043 | 99 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/utils.py | from __future__ import division
from __future__ import print_function
import os
import math
import torch
from .vocab import Vocab
# loading GLOVE word vectors
# if .pth file is found, will load that
# else will load from .txt file & save
def load_word_vectors(path):
if os.path.isfile(path + '.pth') and os.path.isfile(path + '.vocab'):
print('==> File found, loading to memory')
vectors = torch.load(path + '.pth')
vocab = Vocab(filename=path + '.vocab')
return vocab, vectors
# saved file not found, read from txt file
# and create tensors for word vectors
print('==> File not found, preparing, be patient')
count = sum(1 for line in open(path + '.txt', 'r', encoding='utf8', errors='ignore'))
with open(path + '.txt', 'r') as f:
contents = f.readline().rstrip('\n').split(' ')
dim = len(contents[1:])
words = [None] * (count)
vectors = torch.zeros(count, dim, dtype=torch.float, device='cpu')
with open(path + '.txt', 'r', encoding='utf8', errors='ignore') as f:
idx = 0
for line in f:
contents = line.rstrip('\n').split(' ')
words[idx] = contents[0]
values = list(map(float, contents[1:]))
vectors[idx] = torch.tensor(values, dtype=torch.float, device='cpu')
idx += 1
with open(path + '.vocab', 'w', encoding='utf8', errors='ignore') as f:
for word in words:
f.write(word + '\n')
vocab = Vocab(filename=path + '.vocab')
torch.save(vectors, path + '.pth')
return vocab, vectors
# write unique words from a set of files to a new file
def build_vocab(filenames, vocabfile):
vocab = set()
for filename in filenames:
with open(filename, 'r') as f:
for line in f:
tokens = line.rstrip('\n').split(' ')
vocab |= set(tokens)
with open(vocabfile, 'w') as f:
for token in sorted(vocab):
f.write(token + '\n')
# mapping from scalar to vector
def map_label_to_target(label, num_classes):
target = torch.zeros(1, num_classes, dtype=torch.float, device='cpu')
ceil = int(math.ceil(label))
floor = int(math.floor(label))
if ceil == floor:
target[0, floor-1] = 1
else:
target[0, floor-1] = ceil - label
target[0, ceil-1] = label - floor
return target
| 2,376 | 32.957143 | 89 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import Constants
# module for childsumtreelstm
class ChildSumTreeLSTM(nn.Module):
def __init__(self, in_dim, mem_dim):
super(ChildSumTreeLSTM, self).__init__()
self.in_dim = in_dim
self.mem_dim = mem_dim
self.ioux = nn.Linear(self.in_dim, 3 * self.mem_dim)
self.iouh = nn.Linear(self.mem_dim, 3 * self.mem_dim)
self.fx = nn.Linear(self.in_dim, self.mem_dim)
self.fh = nn.Linear(self.mem_dim, self.mem_dim)
def node_forward(self, inputs, child_c, child_h):
child_h_sum = torch.sum(child_h, dim=0, keepdim=True)
iou = self.ioux(inputs) + self.iouh(child_h_sum)
i, o, u = torch.split(iou, iou.size(1) // 3, dim=1)
i, o, u = F.sigmoid(i), F.sigmoid(o), F.tanh(u)
f = F.sigmoid(
self.fh(child_h) +
self.fx(inputs).repeat(len(child_h), 1)
)
fc = torch.mul(f, child_c)
c = torch.mul(i, u) + torch.sum(fc, dim=0, keepdim=True)
h = torch.mul(o, F.tanh(c))
return c, h
def forward(self, tree, inputs):
for idx in range(tree.num_children):
self.forward(tree.children[idx], inputs)
if tree.num_children == 0:
child_c = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
child_h = inputs[0].detach().new(1, self.mem_dim).fill_(0.).requires_grad_()
else:
child_c, child_h = zip(* map(lambda x: x.state, tree.children))
child_c, child_h = torch.cat(child_c, dim=0), torch.cat(child_h, dim=0)
tree.state = self.node_forward(inputs[tree.idx], child_c, child_h)
return tree.state
# module for distance-angle similarity
class Similarity(nn.Module):
def __init__(self, mem_dim, hidden_dim, num_classes):
super(Similarity, self).__init__()
self.mem_dim = mem_dim
self.hidden_dim = hidden_dim
self.num_classes = num_classes
self.wh = nn.Linear(2 * self.mem_dim, self.hidden_dim)
self.wp = nn.Linear(self.hidden_dim, self.num_classes)
def forward(self, lvec, rvec):
mult_dist = torch.mul(lvec, rvec)
abs_dist = torch.abs(torch.add(lvec, -rvec))
vec_dist = torch.cat((mult_dist, abs_dist), 1)
out = F.sigmoid(self.wh(vec_dist))
out = F.log_softmax(self.wp(out), dim=1)
return out
# putting the whole model together
class SimilarityTreeLSTM(nn.Module):
def __init__(self, vocab_size, in_dim, mem_dim, hidden_dim, num_classes, sparsity, freeze):
super(SimilarityTreeLSTM, self).__init__()
self.emb = nn.Embedding(vocab_size, in_dim, padding_idx=Constants.PAD, sparse=sparsity)
if freeze:
self.emb.weight.requires_grad = False
self.childsumtreelstm = ChildSumTreeLSTM(in_dim, mem_dim)
self.similarity = Similarity(mem_dim, hidden_dim, num_classes)
def forward(self, ltree, linputs, rtree, rinputs):
linputs = self.emb(linputs)
rinputs = self.emb(rinputs)
lstate, lhidden = self.childsumtreelstm(ltree, linputs)
rstate, rhidden = self.childsumtreelstm(rtree, rinputs)
output = self.similarity(lstate, rstate)
return output
| 3,286 | 36.352273 | 95 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/dataset.py | import os
from tqdm import tqdm
from copy import deepcopy
import torch
import torch.utils.data as data
from . import Constants
from .tree import Tree
# Dataset class for SICK dataset
class SICKDataset(data.Dataset):
def __init__(self, path, vocab, num_classes):
super(SICKDataset, self).__init__()
self.vocab = vocab
self.num_classes = num_classes
self.lsentences = self.read_sentences(os.path.join(path, 'a.toks'))
self.rsentences = self.read_sentences(os.path.join(path, 'b.toks'))
self.ltrees = self.read_trees(os.path.join(path, 'a.parents'))
self.rtrees = self.read_trees(os.path.join(path, 'b.parents'))
self.labels = self.read_labels(os.path.join(path, 'sim.txt'))
self.size = self.labels.size(0)
def __len__(self):
return self.size
def __getitem__(self, index):
ltree = deepcopy(self.ltrees[index])
rtree = deepcopy(self.rtrees[index])
lsent = deepcopy(self.lsentences[index])
rsent = deepcopy(self.rsentences[index])
label = deepcopy(self.labels[index])
return (ltree, lsent, rtree, rsent, label)
def read_sentences(self, filename):
with open(filename, 'r') as f:
sentences = [self.read_sentence(line) for line in tqdm(f.readlines())]
return sentences
def read_sentence(self, line):
indices = self.vocab.convertToIdx(line.split(), Constants.UNK_WORD)
return torch.tensor(indices, dtype=torch.long, device='cpu')
def read_trees(self, filename):
with open(filename, 'r') as f:
trees = [self.read_tree(line) for line in tqdm(f.readlines())]
return trees
def read_tree(self, line):
parents = list(map(int, line.split()))
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
def read_labels(self, filename):
with open(filename, 'r') as f:
labels = list(map(lambda x: float(x), f.readlines()))
labels = torch.tensor(labels, dtype=torch.float, device='cpu')
return labels
| 2,927 | 32.655172 | 82 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/metrics.py | from copy import deepcopy
import torch
class Metrics():
def __init__(self, num_classes):
self.num_classes = num_classes
def pearson(self, predictions, labels):
x = deepcopy(predictions)
y = deepcopy(labels)
x = (x - x.mean()) / x.std()
y = (y - y.mean()) / y.std()
return torch.mean(torch.mul(x, y))
def mse(self, predictions, labels):
x = deepcopy(predictions)
y = deepcopy(labels)
return torch.mean((x - y) ** 2)
| 504 | 23.047619 | 43 | py |
treelstm.pytorch | treelstm.pytorch-master/treelstm/trainer.py | from tqdm import tqdm
import torch
from . import utils
class Trainer(object):
def __init__(self, args, model, criterion, optimizer, device):
super(Trainer, self).__init__()
self.args = args
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.device = device
self.epoch = 0
# helper function for training
def train(self, dataset):
self.model.train()
self.optimizer.zero_grad()
total_loss = 0.0
indices = torch.randperm(len(dataset), dtype=torch.long, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Training epoch ' + str(self.epoch + 1) + ''):
ltree, linput, rtree, rinput, label = dataset[indices[idx]]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
loss.backward()
if idx % self.args.batchsize == 0 and idx > 0:
self.optimizer.step()
self.optimizer.zero_grad()
self.epoch += 1
return total_loss / len(dataset)
# helper function for testing
def test(self, dataset):
self.model.eval()
with torch.no_grad():
total_loss = 0.0
predictions = torch.zeros(len(dataset), dtype=torch.float, device='cpu')
indices = torch.arange(1, dataset.num_classes + 1, dtype=torch.float, device='cpu')
for idx in tqdm(range(len(dataset)), desc='Testing epoch ' + str(self.epoch) + ''):
ltree, linput, rtree, rinput, label = dataset[idx]
target = utils.map_label_to_target(label, dataset.num_classes)
linput, rinput = linput.to(self.device), rinput.to(self.device)
target = target.to(self.device)
output = self.model(ltree, linput, rtree, rinput)
loss = self.criterion(output, target)
total_loss += loss.item()
output = output.squeeze().to('cpu')
predictions[idx] = torch.dot(indices, torch.exp(output))
return total_loss / len(dataset), predictions
| 2,384 | 40.842105 | 96 | py |
FastVae_Gpu | FastVae_Gpu-main/run_mm.py | from dataloader import RecData, UserItemData
from sampler_gpu_mm import SamplerBase, PopularSampler, MidxUniform, MidxUniPop
import torch
import torch.optim
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from vae_models import BaseVAE, VAE_Sampler
import argparse
import numpy as np
from utils import Eval
import utils
import logging
import datetime
import os
import time
import gc
def evaluate(model, train_mat, test_mat, logger, device):
logger.info("Start evaluation")
model.eval()
with torch.no_grad():
user_num, item_num = train_mat.shape
user_emb = get_user_embs(train_mat, model, device)
item_emb = model._get_item_emb()
user_emb = user_emb.cpu().data
item_emb = item_emb.cpu().data
users = np.random.choice(user_num, min(user_num, 5000), False)
m = Eval.evaluate_item(train_mat[users, :], test_mat[users, :], user_emb[users, :], item_emb, topk=50)
return m
def get_user_embs(data_mat, model, device):
data = UserItemData(data_mat, train_flag=False)
dataloader = DataLoader(data, batch_size=config.batch_size_u, num_workers=config.num_workers, pin_memory=False, shuffle=False, collate_fn=utils.custom_collate_)
user_lst = []
for e in dataloader:
user_his = e
user_emb = model._get_user_emb(user_his.to(device))
user_lst.append(user_emb)
return torch.cat(user_lst, dim=0)
def train_model(model, train_mat, test_mat, config, logger):
optimizer = utils_optim(config.learning_rate, model, config.weight_decay)
scheduler = StepLR(optimizer, config.step_size, config.gamma)
device = torch.device(config.device)
train_data = UserItemData(train_mat)
train_dataloader = DataLoader(train_data, batch_size=config.batch_size, num_workers=config.num_workers, pin_memory=True, shuffle=True, collate_fn=utils.custom_collate_)
initial_list = []
training_list = []
sampling_list = []
inference_list = []
cal_loss_list = []
for epoch in range(config.epoch):
loss_ , kld_loss = 0.0, 0.0
logger.info("Epoch %d"%epoch)
if epoch > 0:
del sampler
if config.sampler > 2:
del item_emb
infer_total_time = 0.0
sample_total_time = 0.0
loss_total_time = 0.0
t0 = time.time()
if config.sampler > 0:
if config.sampler == 1:
sampler = SamplerBase(train_mat.shape[1] * config.multi, config.sample_num, device)
elif config.sampler == 2:
pop_count = np.squeeze(train_mat.sum(axis=0).A)
pop_count = np.r_[pop_count, np.ones(train_mat.shape[1] * (config.multi -1))]
sampler = PopularSampler(pop_count, config.sample_num, device)
elif config.sampler == 3:
item_emb = model._get_item_emb().detach()
sampler = MidxUniform(item_emb, config.sample_num, device, config.cluster_num)
elif config.sampler == 4:
item_emb = model._get_item_emb().detach()
pop_count = np.squeeze(train_mat.sum(axis=0).A)
pop_count = np.r_[pop_count, np.ones(train_mat.shape[1] * (config.multi -1))]
sampler = MidxUniPop(item_emb, config.sample_num, device, config.cluster_num, pop_count)
t1 = time.time()
for batch_idx, data in enumerate(train_dataloader):
model.train()
if config.sampler > 0 :
sampler.train()
else:
sampler = None
pos_id = data
pos_id = pos_id.to(device)
optimizer.zero_grad()
tt0 = time.time()
mu, logvar, loss, sample_time, loss_time = model(pos_id, sampler)
tt1 = time.time()
sample_total_time += sample_time
infer_total_time += tt1 - tt0
loss_total_time += loss_time
kl_divergence = model.kl_loss(mu, logvar, config.anneal, reduction=config.reduction)/config.batch_size
loss_ += loss.item()
kld_loss += kl_divergence.item()
loss += kl_divergence.item()
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
# break
# torch.cuda.empty_cache()
t2= time.time()
logger.info('--loss : %.2f, kl_dis : %.2f, total : %.2f '% (loss_, kld_loss, loss_ + kld_loss))
torch.cuda.empty_cache()
scheduler.step()
gc.collect()
initial_list.append(t1 - t0)
training_list.append(t2 - t1)
sampling_list.append(sample_total_time)
inference_list.append(infer_total_time)
cal_loss_list.append(loss_total_time)
if (epoch % 10) == 0:
result = evaluate(model, train_mat, test_mat, logger, device)
logger.info('***************Eval_Res : NDCG@5,10,50 %.6f, %.6f, %.6f'%(result['item_ndcg'][4], result['item_ndcg'][9], result['item_ndcg'][49]))
logger.info('***************Eval_Res : RECALL@5,10,50 %.6f, %.6f, %.6f'%(result['item_recall'][4], result['item_recall'][9], result['item_recall'][49]))
logger.info(' Initial Time : {}'.format(np.mean(initial_list)))
logger.info(' Sampling Time : {}'.format(np.mean(sampling_list)))
logger.info(' Inference Time: {}'.format(np.mean(inference_list)))
logger.info(' Calc Loss Time: {}'.format(np.mean(cal_loss_list)))
logger.info(' Training Time (One epoch, including the dataIO, sampling, inference and backward time) : {}'.format(np.mean(training_list)))
def utils_optim(learning_rate, model, w):
if config.optim=='adam':
return torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w)
elif config.optim=='sgd':
return torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w)
else:
raise ValueError('Unkown optimizer!')
def main(config, logger=None):
device = torch.device(config.device)
data = RecData(config.data_dir, config.data)
train_mat, test_mat = data.get_data(config.ratio)
user_num, item_num = train_mat.shape
logging.info('The shape of datasets: %d, %d'%(user_num, item_num))
assert config.sample_num < item_num
if config.model == 'vae' and config.sampler == 0:
model = BaseVAE(item_num * config.multi, config.dim)
elif config.model == 'vae' and config.sampler > 0:
model = VAE_Sampler(item_num * config.multi, config.dim)
else:
raise ValueError('Not supported model name!!!')
model = model.to(device)
train_model(model, train_mat, test_mat, config, logger)
return evaluate(model, train_mat, test_mat, logger, device)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Initialize Parameters!')
parser.add_argument('-data', default='ml10M', type=str, help='path of datafile')
parser.add_argument('-d', '--dim', default=[200, 32], type=int, nargs='+', help='the dimenson of the latent vector for student model')
parser.add_argument('-s','--sample_num', default=500, type=int, help='the number of sampled items')
parser.add_argument('--subspace_num', default=2, type=int, help='the number of splitted sub space')
parser.add_argument('--cluster_num', default=16, type=int, help='the number of cluster centroids')
parser.add_argument('-b', '--batch_size', default=256, type=int, help='the batch size for training')
parser.add_argument('-e','--epoch', default=2, type=int, help='the number of epoches')
parser.add_argument('-o','--optim', default='adam', type=str, help='the optimizer for training')
parser.add_argument('-lr', '--learning_rate', default=0.001, type=float, help='the learning rate for training')
parser.add_argument('--seed', default=20, type=int, help='random seed values')
parser.add_argument('--ratio', default=0.8, type=float, help='the spilit ratio of dataset for train and test')
parser.add_argument('--log_path', default='logs_test', type=str, help='the path for log files')
parser.add_argument('--num_workers', default=16, type=int, help='the number of workers for dataloader')
parser.add_argument('--data_dir', default='datasets', type=str, help='the dir of datafiles')
parser.add_argument('--device', default='cuda', type=str, help='device for training, cuda or gpu')
parser.add_argument('--model', default='vae', type=str, help='model name')
parser.add_argument('--sampler', default=4, type=int, help='the sampler, 0 : no sampler, 1: uniform, 2: popular, 3: MidxUni, 4: MidxPop')
parser.add_argument('--fix_seed', default=True, type=bool, help='whether to fix the seed values')
parser.add_argument('--step_size', default=5, type=int, help='step size for learning rate discount')
parser.add_argument('--gamma', default=0.95, type=float, help='discout for lr')
parser.add_argument('--anneal', default=1.0, type=float, help='parameters for kl loss')
parser.add_argument('--batch_size_u', default=128, type=int, help='batch size user for inference')
parser.add_argument('--reduction', default=False, type=bool, help='loss if reduction')
parser.add_argument('-w', '--weight_decay', default=1e-3, type=float, help='weight decay for the optimizer' )
parser.add_argument('--multi', default=1, type=int, help='the number of extended items')
config = parser.parse_args()
import os
if not os.path.exists(config.log_path):
os.makedirs(config.log_path)
alg = config.model
sampler = str(config.sampler) + '_' + str(config.multi) + 'x'
ISOTIMEFORMAT = '%m%d-%H%M%S'
timestamp = str(datetime.datetime.now().strftime(ISOTIMEFORMAT))
loglogs = '_'.join((config.data, sampler, timestamp))
log_file_name = os.path.join(config.log_path, loglogs)
logger = utils.get_logger(log_file_name)
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
logger.info(config)
if config.fix_seed:
utils.setup_seed(config.seed)
import time
t0 = time.time()
m = main(config, logger)
t1 = time.time()
logger.info('Eval_Res : NDCG@5,10,50 %.6f, %.6f, %.6f'%(m['item_ndcg'][4], m['item_ndcg'][9], m['item_ndcg'][49]))
logger.info('Eval_Res : RECALL@5,10,50 %.6f, %.6f, %.6f'%(m['item_recall'][4], m['item_recall'][9], m['item_recall'][49]))
logger.info("Finish")
svmat_name = log_file_name + '.mat'
logger.info('Total Running Time: {}'.format(t1-t0) ) | 10,590 | 45.862832 | 172 | py |
FastVae_Gpu | FastVae_Gpu-main/vae_models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import time
class BaseVAE(nn.Module):
def __init__(self, num_item, dims, active='relu', dropout=0.5):
"""
dims is a list for latent dims
"""
super(BaseVAE, self).__init__()
self.num_item = num_item
self.dims = dims
assert len(dims) == 2, 'Not supported dims'
self.encode_layer_0 = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
self.encode_layer_1 = nn.Linear(dims[0], dims[1] * 2)
self.decode_layer_0 = nn.Linear(dims[1], dims[0])
# self._Item_Embeddings = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
self._Item_Embeddings = nn.Linear(dims[0], self.num_item + 1)
self.dropout = nn.Dropout(dropout)
if active == 'relu':
self.act = F.relu
elif active == 'tanh':
self.act == F.tanh
elif active == 'sigmoid':
self.act == F.sigmoid
else:
raise ValueError('Not supported active function')
def encode(self, item_id):
# item_id is padded
count_nonzero = item_id.count_nonzero(dim=1).unsqueeze(-1) # batch_user * 1
user_embs = self.encode_layer_0(item_id) # batch_user * dims
user_embs = torch.sum(user_embs, dim=1) / count_nonzero.pow(0.5)
user_embs = self.dropout(user_embs)
h = self.act(user_embs)
h = self.encode_layer_1(h)
mu, logvar = h[:, :self.dims[1]], h[:, self.dims[1]:]
return mu, logvar
def decode(self, user_emb_encode, items):
user_emb = self.decode_layer_0(user_emb_encode)
# item_embs = self._Item_Embeddings(items)
# item_embs = F.normalize(item_embs)
return self._Item_Embeddings(user_emb)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, pos_items, sampler=None):
mu, logvar = self.encode(pos_items)
z = self.reparameterize(mu, logvar)
items = torch.arange(self.num_item + 1, device=z.device)
self.pos_items = pos_items
part_rats = self.decode(z, items)
t00 = time.time()
loss = self.loss_function(part_rats)
t11 = time.time()
return mu, logvar, loss, 0.0, t11-t00
def kl_loss(self, mu, log_var, anneal=1.0, reduction=False):
if reduction is True:
return -anneal * 0.5 * torch.mean(torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim = 1), dim = 0)
else:
return -anneal * 0.5 * torch.sum(torch.sum(1 + log_var - mu.pow(2) - log_var.exp(), dim = 1), dim = 0)
def loss_function(self, part_rats, prob_neg=None, pos_rats=None, prob_pos=None, reduction=False):
# max_v, _ = torch.max(part_rats, dim=-1)
# chuli = part_rats - max_v.unsqueeze(-1)
# logits = chuli - torch.log(torch.sum(torch.exp(chuli), dim=-1)).unsqueeze(-1)
logits = F.log_softmax(part_rats, dim=-1)
idx_mtx = (self.pos_items > 0).double()
if reduction is True:
return -torch.sum(torch.gather(logits, 1, self.pos_items) * idx_mtx, dim=-1).mean()
else:
return -torch.sum(torch.gather(logits, 1, self.pos_items)* idx_mtx, dim=-1).sum()
def _get_user_emb(self, user_his):
user_emb, _ = self.encode(user_his)
return self.decode_layer_0(user_emb)
def _get_item_emb(self):
return self._Item_Embeddings.weight[1:]
class VAE_Sampler(BaseVAE):
def __init__(self, num_item, dims, active='relu', dropout=0.5):
super(VAE_Sampler, self).__init__(num_item, dims, active=active, dropout=dropout)
self._Item_Embeddings = nn.Embedding(self.num_item + 1, dims[0], padding_idx=0)
def decode(self, user_emb_encode, items):
user_emb = self.decode_layer_0(user_emb_encode)
item_embs = self._Item_Embeddings(items)
# return torch.matmul(user_emb.view(user_emb.shape[0], 1, -1), item_embs.transpose(1,2)).squeeze(1)
# return user_emb.unsqueeze(1).bmm(item_embs.transpose(1,2)).squeeze(1)
return (user_emb.unsqueeze(1) * item_embs).sum(-1)
# return (user_emb.view(user_emb.shape[0], 1, -1) * item_embs).sum(-1)
# return torch.einsum('ijk,ik->ij', item_embs, user_emb)
def forward(self, pos_items, sampler):
mu, logvar = self.encode(pos_items)
z = self.reparameterize(mu, logvar)
user_emb = self.decode_layer_0(z)
with torch.no_grad():
t0 = time.time()
pos_prob, neg_items, neg_prob = sampler(user_emb, pos_items)
t1 = time.time()
pos_items_emb = self._Item_Embeddings(pos_items)
neg_items_emb = self._Item_Embeddings(neg_items)
pos_rat = (user_emb.unsqueeze(1) * pos_items_emb).sum(-1)
neg_rat = (user_emb.unsqueeze(1) * neg_items_emb).sum(-1)
t00 = time.time()
loss = self.loss_function(neg_rat, neg_prob, pos_rat, pos_prob)
t11 = time.time()
return mu, logvar, loss, t1 - t0, t11 - t00
def loss_function(self, part_rats, log_prob_neg=None, pos_rats=None, log_prob_pos=None, reduction=False):
idx_mtx = (pos_rats != 0).double()
new_pos = pos_rats - log_prob_pos.detach()
new_neg = part_rats - log_prob_neg.detach()
# parts_log_sum_exp = torch.logsumexp(new_neg, dim=-1).unsqueeze(-1)
# final = torch.log( torch.exp(new_pos) + torch.exp(parts_log_sum_exp))
parts_sum_exp = torch.sum(torch.exp(new_neg), dim=-1).unsqueeze(-1)
final = torch.log(torch.exp(new_pos) + parts_sum_exp)
if reduction is True:
return torch.sum((- new_pos + final) * idx_mtx, dim=-1 ).mean()
else:
return torch.sum((- new_pos + final) * idx_mtx, dim=-1 ).sum()
# idx_mtx = (pos_rats != 0).double()
# new_pos = pos_rats - log_prob_pos.detach()
# new_neg = part_rats - log_prob_neg.detach()
# # new_pos[pos_rats==0] = -np.inf
# logits = torch.log_softmax(torch.cat([new_pos, new_neg], dim=-1), dim=-1)
# num_pos_item = pos_rats.shape[1]
# if reduction is True:
# return -torch.sum( logits[:, :num_pos_item] * idx_mtx, dim=-1).mean()
# else:
# return -torch.sum( logits * idx_mtx, dim=-1).sum()
| 6,596 | 38.740964 | 115 | py |
FastVae_Gpu | FastVae_Gpu-main/dataloader.py | import pandas as pd
from torch.utils.data import IterableDataset, Dataset
import torch
from torch.utils.data import Dataset, IterableDataset, DataLoader
import scipy.io as sci
import scipy as sp
import random
import numpy as np
import math
import os
class RecData(object):
def __init__(self, dir, file_name):
file_name = file_name + 'data.mat'
# file_name = file_name + 'data.txt'
self.file_name = os.path.join(dir, file_name)
def get_data(self,ratio):
mat = self.load_file(filename=self.file_name)
train_mat, test_mat = self.split_matrix(mat, ratio)
return train_mat, test_mat
def load_file(self,filename=''):
# if file_name.endswith('.mat'):
# return sci.loadmat(file_name)['data']
# else:
# raise ValueError('not supported file type')
if filename.endswith('.mat'):
return sci.loadmat(filename)['data']
elif filename.endswith('.txt') or filename.endswith('.tsv'):
sep = '\t'
elif filename.endswith('.csv'):
sep = ','
else:
raise ValueError('not supported file type')
max_user = -1
max_item = -1
row_idx = []
col_idx = []
data = []
for line in open(filename):
user, item, rating = line.strip().split(sep)
user, item, rating = int(user) -1, int(item)-1, float(rating)
row_idx.append(user)
col_idx.append(item)
data.append(rating)
if user > max_user:
max_user = user
if item > max_item:
max_item = item
return sp.sparse.csc_matrix((data, (row_idx, col_idx)), (max_user+1, max_item+1))
def split_matrix(self, mat, ratio=0.8):
mat = mat.tocsr() #按行读取,即每一行为一个用户
m,n = mat.shape
train_data_indices = []
train_indptr = [0] * (m+1)
test_data_indices = []
test_indptr = [0] * (m+1)
for i in range(m):
row = [(mat.indices[j], mat.data[j]) for j in range(mat.indptr[i], mat.indptr[i+1])]
train_idx = random.sample(range(len(row)), round(ratio * len(row)))
train_binary_idx = np.full(len(row), False)
train_binary_idx[train_idx] = True
test_idx = (~train_binary_idx).nonzero()[0]
for idx in train_idx:
train_data_indices.append(row[idx])
train_indptr[i+1] = len(train_data_indices)
for idx in test_idx:
test_data_indices.append(row[idx])
test_indptr[i+1] = len(test_data_indices)
[train_indices, train_data] = zip(*train_data_indices)
[test_indices, test_data] = zip(*test_data_indices)
train_mat = sp.sparse.csr_matrix((train_data, train_indices, train_indptr), (m,n))
test_mat = sp.sparse.csr_matrix((test_data, test_indices, test_indptr), (m,n))
return train_mat, test_mat
class UserItemData(Dataset):
def __init__(self, train_mat, train_flag=True):
super(UserItemData, self).__init__()
self.train = train_mat
if train_flag is True:
self.users = np.random.permutation(self.train.shape[0])
else:
self.users = np.arange(self.train.shape[0])
def __len__(self):
return self.train.shape[0]
def __getitem__(self, idx):
# return self.user[idx], self.item[idx]
pos_idx = self.train[self.users[idx]].nonzero()[1]
return pos_idx | 3,526 | 35.739583 | 96 | py |
FastVae_Gpu | FastVae_Gpu-main/utils.py | import scipy as sp
import scipy.sparse as ss
import scipy.io as sio
import random
import numpy as np
from typing import List
import logging
import torch
import math
from torch.nn.utils.rnn import pad_sequence
def get_logger(filename, verbosity=1, name=None):
filename = filename + '.txt'
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
def setup_seed(seed):
import os
os.environ['PYTHONHASHSEED']=str(seed)
import random
random.seed(seed)
np.random.seed(seed)
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def get_max_length(x):
return max(x, key=lambda x: x.shape[0]).shape[0]
def pad_sequence_int(seq):
def _pad(_it, _max_len):
return np.concatenate(( _it + 1, np.zeros(_max_len - len(_it), dtype=np.int32) ))
return [_pad(it, get_max_length(seq)) for it in seq]
def custom_collate_(batch):
return torch.LongTensor(pad_sequence_int(batch))
class Eval:
@staticmethod
def evaluate_item(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=50, cutoff:int=50):
train = train.tocsr()
test = test.tocsr()
idx = np.squeeze((test.sum(axis=1) > 0).A)
train = train[idx, :]
test = test[idx, :]
user = user[idx, :]
N = train.shape[1]
cand_count = N - train.sum(axis=1)
if topk <0:
mat_rank = Eval.predict(train, test, user, item)
else:
mat_rank = Eval.topk_search_(train, test, user, item, topk)
return Eval.compute_item_metric(test, mat_rank, cand_count, cutoff)
@staticmethod
def compute_item_metric(test:ss.csr_matrix, mat_rank:ss.csr_matrix, cand_count:np.ndarray, cutoff:int=200):
rel_count = (test !=0).sum(axis=1)
istopk = mat_rank.max() < test.shape[1] * 0.5
recall, precision, map = Eval.compute_recall_precision(mat_rank, rel_count, cutoff)
ndcg = Eval.compute_ndcg(test, mat_rank, cutoff)
if not istopk:
auc, mpr = Eval.compute_auc(mat_rank, rel_count, cand_count)
return {'item_recall': recall, 'item_prec': precision, 'item_map': map, 'item_ndcg': ndcg, 'item_mpr':mpr, 'item_auc':auc}
else:
return {'item_recall':recall, 'item_prec':precision, 'item_map': map, 'item_ndcg':ndcg}
@staticmethod
def compute_ndcg(test, mat_rank, cutoff):
M, _ = test.shape
mat_rank_ = mat_rank.tocoo()
user, item, rank = mat_rank_.row, mat_rank_.col, mat_rank_.data
score = np.squeeze(test[(user, item)].A) / np.log2(rank + 2)
dcg_score = ss.csr_matrix((score, (user, rank)), shape=test.shape)
dcg = np.cumsum(dcg_score[:, :cutoff].todense(), axis=1)
dcg = np.c_[dcg, dcg_score.sum(axis=1)]
idcg = np.zeros((M, cutoff+1))
for i in range(M):
r = test.data[test.indptr[i]:test.indptr[i+1]]
idcg_ = np.cumsum(-np.sort(-r) / np.log2(np.array(range(len(r)))+2))
if cutoff > len(r):
idcg[i,:] = np.r_[idcg_, np.tile(idcg_[-1], cutoff+1-len(r))]
else:
idcg[i,:] = np.r_[idcg_[:cutoff], idcg_[-1]]
ndcg = dcg / idcg
ndcg = np.mean(ndcg, axis=0)
return np.squeeze(ndcg.A)
@staticmethod
def compute_recall_precision(mat_rank, user_count, cutoff):
user_count = user_count.A.T
M, _ = mat_rank.shape
mat_rank_ = mat_rank.tocoo()
user, rank = mat_rank_.row, mat_rank_.data
user_rank = ss.csr_matrix((np.ones_like(user), (user, rank)), shape=mat_rank.shape)
user_rank = user_rank[:,:cutoff].todense()
user_count_inv = ss.diags(1/user_count, [0])
cum = np.cumsum(user_rank, axis=1)
recall = np.mean(user_count_inv * cum, axis=0)
prec_cum = cum * ss.diags(1/np.array(range(1,cutoff+1)), 0)
prec = np.mean(prec_cum, axis=0)
div = np.minimum(np.tile(range(1, cutoff+1), (M, 1)), np.tile(user_count.T, (1, cutoff)))
map = np.mean(np.divide(np.cumsum(np.multiply(prec_cum,user_rank), axis=1), div), axis=0)
return np.squeeze(recall.A), np.squeeze(prec.A), np.squeeze(map.A)
@staticmethod
def compute_auc(mat_rank, rel_count, cand_count):
rel_count = rel_count.A
cand_count = cand_count.A
tmp = mat_rank.sum(axis=1)
mpr = np.mean(tmp / cand_count / rel_count)
auc_vec = rel_count * cand_count - tmp - rel_count - rel_count * (rel_count - 1) / 2
auc_vec = auc_vec / ((cand_count - rel_count) * rel_count)
auc = np.mean(auc_vec)
return auc, mpr
@staticmethod
def evaluate_item_with_code(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200, cutoff=200):
train = train.tocsr()
test = test.tocsr()
#result1 = Eval.topk_search_with_code(train, user, item_code, item_center, topk)
result = Eval.topk_search_with_code_fast(train, user, item_code, item_center, topk)
return Eval.evaluate_topk(train, test, result, cutoff)
#@staticmethod
#def topk_search_approximate(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray]):
@staticmethod
def topk_search_with_code_fast(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200):
M, _ = train.shape
traind = [train.indices[train.indptr[i]:train.indptr[i + 1]].tolist() for i in range(M)]
center = np.concatenate(item_center)
#result = uts.topk_search_with_code(traind, user, item_code, center, topk)
#return result.reshape([M, topk])
return None
@staticmethod
def topk_search_with_code(train:ss.csr_matrix, user:np.ndarray, item_code: np.ndarray, item_center: List[np.ndarray], topk=200):
item_center = np.stack(item_center, 0) # m x K x D
M = train.shape[0]
result = np.zeros((M, topk), dtype=np.int)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i + 1]]
center_score = np.tensordot(item_center, user[i,:], [-1, -1]) # m x K
#pred = uts.fetch_score(item_code, center_score)
#pred[E] = -np.inf
#idx = np.argpartition(pred, -topk)[-topk:]
#result[i, :] = idx[np.argsort(-pred[idx])]
return result
@staticmethod
def item_reranking(topk_item: np.ndarray, score_func):
M, K = topk_item.shape
result = np.zeros_like(topk_item)
for i in range(M):
score_item = [(topk_item[i, k], score_func(i, topk_item[i, k])) for k in range(K)]
result[i,:] = [a for (a, b) in sorted(score_item, key=lambda x: -x[1])]
return result
@staticmethod
def evaluate_topk(train:ss.csr_matrix, test:ss.csr_matrix, topk_item:np.ndarray, cutoff:int=200):
train = train.tocsr()
test = test.tocsr()
result = topk_item
N = train.shape[1]
cand_count = N - train.sum(axis=1)
M = test.shape[0]
uir = []
for i in range(M):
R = set(test.indices[test.indptr[i]:test.indptr[i+1]])
for k in range(result.shape[1]):
if result[i,k] in R:
uir.append((i, result[i,k], k))
user_id, item_id, rank = zip(*uir)
mat_rank = ss.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
return Eval.compute_item_metric(test, mat_rank, cand_count, cutoff)
@staticmethod
def topk_search(train:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=200)->np.ndarray:
train = train.tocsr()
M, _ = train.shape
item_t = item.T
result = np.zeros((M, topk), dtype=np.int)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i+1]]
pred = np.matmul(user[i,:], item_t)
#pred = np.tensordot(user[i,:], item, [0,-1])
pred[E] = -np.inf
idx = np.argpartition(pred, -topk)[-topk:]
result[i,:] = idx[np.argsort(-pred[idx])]
return result
@staticmethod
def topk_search_(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray, topk:int=200)->ss.csr_matrix:
M, _ = train.shape
#traind = [train.indices[train.indptr[i]:train.indptr[i + 1]].tolist() for i in range(M)]
#result = uts.topk_search(traind, user, item, topk).reshape([M, topk])
result = Eval.topk_search(train, user, item, topk)
uir = []
for i in range(M):
R = set(test.indices[test.indptr[i]:test.indptr[i+1]])
for k in range(topk):
if result[i,k] in R:
uir.append((i, result[i,k], k))
user_id, item_id, rank = zip(*uir)
mat_rank = ss.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
return mat_rank
#user_id, rank = result.nonzero()
#item_id = result[(user_id, rank)]
#mat_rank = sp.csr_matrix((rank, (user_id, item_id)), shape=test.shape)
#return mat_rank.multiply(test !=0)
@staticmethod
def predict(train:ss.csr_matrix, test:ss.csr_matrix, user:np.ndarray, item:np.ndarray)->ss.csr_matrix:
M, _ = train.shape
item_t = item.T
full_rank = np.zeros_like(test.data)
for i in range(M):
E = train.indices[train.indptr[i]:train.indptr[i+1]]
R = test.indices[test.indptr[i]:test.indptr[i+1]]
U = user[i,:]
pred = np.matmul(U, item_t)
pred[E] = -np.inf
idx = np.argsort(-pred)
rank = np.zeros_like(idx)
rank[idx] = range(len(idx))
full_rank[test.indptr[i]:test.indptr[i+1]] = rank[R]
mat_rank = ss.csr_matrix((full_rank, test.indices, test.indptr), shape=test.shape)
return mat_rank
@staticmethod
def format(metric:dict):
list_str = []
for k, v in metric.items():
if 'ndcg' in k:
m_str = '{0:11}:[{1}, {2:.4f}]'.format(k ,', '.join('{:.4f}'.format(e) for e in v[(10-1)::10]), v[-1])
elif not isinstance(v, np.ndarray):
m_str = '{0:11}:{1:.4f}'.format(k , v)
else:
m_str = '{0:11}:[{1}]'.format(k ,', '.join('{:.4f}'.format(e) for e in v[(10-1)::10]))
list_str.append(m_str)
return '\n'.join(list_str)
| 10,977 | 41.550388 | 166 | py |
FastVae_Gpu | FastVae_Gpu-main/sampler_gpu_mm.py | # The cluster algorithmn(K-means) is implemented on the GPU
from operator import imod, neg
from numpy.core.numeric import indices
import scipy.sparse as sps
from sklearn import cluster
from sklearn.cluster import KMeans
import torch
import numpy as np
import torch.nn as nn
from torch._C import device, dtype
def kmeans(X, K_or_center, max_iter=300, verbose=False):
N = X.size(0)
if isinstance(K_or_center, int) is True:
K = K_or_center
C = X[torch.randperm(N, device=X.device)[:K]]
else:
K = K_or_center.size(0)
C = K_or_center
prev_loss = np.inf
for iter in range(max_iter):
dist = torch.sum(X * X, dim=-1, keepdim=True) - 2 * (X @ C.T) + torch.sum(C * C, dim=-1).unsqueeze(0)
assign = dist.argmin(-1)
assign_m = torch.zeros(N, K, device=X.device)
assign_m[(range(N), assign)] = 1
loss = torch.sum(torch.square(X - C[assign,:])).item()
if verbose:
print(f'step:{iter:<3d}, loss:{loss:.3f}')
if (prev_loss - loss) < prev_loss * 1e-6:
break
prev_loss = loss
cluster_count = assign_m.sum(0)
C = (assign_m.T @ X) / cluster_count.unsqueeze(-1)
empty_idx = cluster_count<.5
ndead = empty_idx.sum().item()
C[empty_idx] = X[torch.randperm(N, device=X.device)[:ndead]]
return C, assign, assign_m, loss
def construct_index(cd01, K):
# Stable is availabel in PyTorch 1.9. Earlier version is not supported.
cd01, indices = torch.sort(cd01, stable=True)
# save the indices according to the cluster
cluster, count = torch.unique_consecutive(cd01, return_counts=True)
count_all = torch.zeros(K**2 + 1, dtype=torch.long, device=cd01.device)
count_all[cluster + 1] = count
indptr = count_all.cumsum(dim=-1)
return indices, indptr
class SamplerBase(nn.Module):
"""
Uniformly Sample negative items for each query.
"""
def __init__(self, num_items, num_neg, device, **kwargs):
super().__init__()
self.num_items = num_items
self.num_neg = num_neg
self.device = device
def forward(self, query, pos_items=None, padding=0):
"""
Input
query: torch.tensor
Sequential models:
query: (B,L,D), pos_items : (B, L)
Normal models:
query: (B,D), pos_items: (B,L)
Output
pos_prob(None if no pos_items), neg_items, neg_prob
pos_items.shape == pos_prob.shape
neg_items.shape == neg_prob.shape
Sequential models:
neg_items: (B,L,N)
Normal
"""
assert padding == 0
num_queries = np.prod(query.shape[:-1]) # for sequential models the number of queries is the B x L
neg_items = torch.randint(1, self.num_items + 1, size=(num_queries, self.num_neg), device=self.device)
neg_items = neg_items.view(*query.shape[:-1], -1)
neg_prob = -torch.log(self.num_items * torch.ones_like(neg_items, dtype=torch.float))
if pos_items is not None:
pos_prob = -torch.log(self.num_items * torch.ones_like(pos_items, dtype=torch.float))
return pos_prob, neg_items, neg_prob
return None, neg_items, neg_prob
class PopularSampler(SamplerBase):
def __init__(self, pop_count, num_neg, device, mode=0, **kwargs):
super().__init__(pop_count.shape[0], num_neg, device)
pop_count = torch.from_numpy(pop_count).to(self.device)
if mode == 0:
pop_count = torch.log(pop_count + 1)
elif mode == 1:
pop_count = torch.log(pop_count) + 1e-16
elif mode == 2:
pop_count = pop_count**0.75
pop_count = torch.cat([torch.zeros(1, device=self.device), pop_count])
self.pop_prob = pop_count / pop_count.sum()
self.table = torch.cumsum(self.pop_prob, -1)
self.pop_prob[0] = torch.ones(1, device=self.device)
def forward(self, query, pos_items=None, padding=0):
assert padding == 0
num_queris = np.prod(query.shape[:-1])
seeds = torch.rand(num_queris, self.num_neg, device=self.device)
neg_items = torch.searchsorted(self.table, seeds)
neg_items = neg_items.view(*query.shape[:-1], -1)
neg_prob = torch.log(self.pop_prob[neg_items])
if pos_items is not None:
pos_prob = torch.log(self.pop_prob[pos_items])
return pos_prob, neg_items, neg_prob
return None, neg_items, neg_prob
class MidxUniform(SamplerBase):
"""
Midx Sampler with Uniform Variant
"""
def __init__(self, item_embs:torch.tensor, num_neg, device, num_cluster, item_pop:torch.tensor = None, **kwargs):
super().__init__(item_embs.shape[0], num_neg, device)
if isinstance(num_cluster, int) is True:
self.K = num_cluster
else:
self.K = num_cluster.size(0)
embs1, embs2 = torch.chunk(item_embs, 2, dim=-1)
self.c0, cd0, cd0m, _ = kmeans(embs1, num_cluster)
self.c1, cd1, cd1m, _ = kmeans(embs2, num_cluster)
self.c0_ = torch.cat([torch.zeros(1, self.c0.size(1), device=self.device), self.c0], dim=0) ## for retreival probability, considering padding
self.c1_ = torch.cat([torch.zeros(1, self.c1.size(1), device=self.device), self.c1], dim=0) ## for retreival probability, considering padding
self.cd0 = torch.cat([torch.tensor([-1]).to(self.device), cd0], dim=0) + 1 ## for retreival probability, considering padding
self.cd1 = torch.cat([torch.tensor([-1]).to(self.device), cd1], dim=0) + 1 ## for retreival probability, considering padding
cd01 = cd0 * self.K + cd1
self.indices, self.indptr = construct_index(cd01, self.K)
if item_pop is None:
self.wkk = cd0m.T @ cd1m
else:
self.wkk = cd0m.T @ (cd1m * item_pop.view(-1, 1))
def forward(self, query, pos_items=None, padding=0):
assert padding == 0
q0, q1 = query.view(-1, query.size(-1)).chunk(2, dim=-1)
r1 = q1 @ self.c1.T
r1s = torch.softmax(r1, dim=-1) # num_q x K1
r0 = q0 @ self.c0.T
r0s = torch.softmax(r0, dim=-1) # num_q x K0
s0 = (r1s @ self.wkk.T) * r0s # num_q x K0 | wkk: K0 x K1
k0 = torch.multinomial(s0, self.num_neg, replacement=True) # num_q x neg
p0 = torch.gather(r0, -1, k0) # num_q * neg
subwkk = self.wkk[k0, :] # num_q x neg x K1
s1 = subwkk * r1s.unsqueeze(1) # num_q x neg x K1
k1 = torch.multinomial(s1.view(-1, s1.size(-1)), 1).squeeze(-1).view(*s1.shape[:-1]) # num_q x neg
p1 = torch.gather(r1, -1, k1) # num_q x neg
k01 = k0 * self.K + k1 # num_q x neg
p01 = p0 + p1
neg_items, neg_prob = self.sample_item(k01, p01)
if pos_items is not None:
pos_prop = self.compute_item_p(query, pos_items)
return pos_prop, neg_items.view(*query.shape[:-1], -1), neg_prob.view(*query.shape[:-1], -1)
return None, neg_items.view(*query.shape[:-1], -1), neg_prob.view(*query.shape[:-1], -1)
def sample_item(self, k01, p01):
item_cnt = self.indptr[k01 + 1] - self.indptr[k01] # num_q x neg, the number of items
item_idx = torch.floor(item_cnt * torch.rand_like(item_cnt, dtype=torch.float32, device=self.device)).long() # num_q x neg
neg_items = self.indices[item_idx + self.indptr[k01]] + 1
neg_prob = p01
return neg_items, neg_prob
def compute_item_p(self, query, pos_items):
# query: B x L x D, pos_items: B x L || query: B x D, pos_item: B x L1 || assume padding=0
k0 = self.cd0[pos_items] # B x L || B x L1
k1 = self.cd1[pos_items] # B x L || B x L1
c0 = self.c0_[k0, :] # B x L x D || B x L1 x D
c1 = self.c1_[k1, :] # B x L x D || B x L1 x D
q0, q1 = query.chunk(2, dim=-1) # B x L x D || B x D
if query.dim() == pos_items.dim():
r = (torch.bmm(c0, q0.unsqueeze(-1)) + torch.bmm(c1, q1.unsqueeze(-1))).squeeze(-1) # B x L1
else:
r = torch.sum(c0 * q0, dim=-1) + torch.sum(c1 * q1, dim=-1) # B x L
return r
class MidxUniPop(MidxUniform):
"""
Popularity sampling for the final items
"""
def __init__(self, item_embs: np.ndarray, num_neg, device, num_cluster, pop_count, mode=1, **kwargs):
if mode == 0:
pop_count = np.log(pop_count + 1)
elif mode == 1:
pop_count = np.log(pop_count + 1) + 1e-6
elif mode == 2:
pop_count = pop_count**0.75
pop_count = torch.tensor(pop_count, dtype=torch.float32, device=device)
super(MidxUniPop, self).__init__(item_embs, num_neg, device, num_cluster, pop_count)
self.p = torch.cat([torch.ones(1, device=self.device), pop_count], dim=0) # this is similar, to avoid log 0 !!! in case of zero padding
self.cp = pop_count[self.indices]
for c in range(self.K**2):
start, end = self.indptr[c], self.indptr[c+1]
if end > start:
cumsum = self.cp[start:end].cumsum(-1)
self.cp[start:end] = cumsum / cumsum[-1]
def forward(self, query, pos_items=None, padding=0):
return super().forward(query, pos_items=pos_items, padding=padding)
def sample_item(self, k01, p01):
# k01 num_q x neg, p01 num_q x neg
start = self.indptr[k01]
last = self.indptr[k01 + 1] - 1
count = last - start + 1
maxlen = count.max()
# print(maxlen)
fullrange = start.unsqueeze(-1) + torch.arange(maxlen, device=self.device).reshape(1, 1, maxlen) # num_q x neg x maxlen
fullrange = torch.minimum(fullrange, last.unsqueeze(-1))
item_idx = torch.searchsorted(self.cp[fullrange], torch.rand_like(start, dtype=torch.float32, device=self.device).unsqueeze(-1)).squeeze(-1) ## num_q x neg
item_idx = torch.minimum(item_idx, last)
neg_items = self.indices[item_idx + self.indptr[k01]] + 1
# neg_probs = self.p[item_idx + self.indptr[k01] + 1] # plus 1 due to considering padding, since p include num_items + 1 entries
neg_probs = self.p[neg_items]
return neg_items, p01 + torch.log(neg_probs)
def compute_item_p(self, query, pos_items):
r = super().compute_item_p(query, pos_items)
p_r = self.p[pos_items]
return r + torch.log(p_r)
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "5"
device = 'cuda'
from dataloader import RecData
from utils import setup_seed
setup_seed(10)
data = RecData('datasets', 'amazoni')
train, test = data.get_data(0.8)
dim = 200
user_num, num_items = train.shape
num_neg = 2000
num_cluster = 32
max_iter = 200
# item_embs = np.random.randn(num_items, dim)
item_embs = torch.randn(num_items,dim, device=device) * 0.1
pop_count = np.squeeze(train.sum(axis=0).A)
device = torch.device(device)
# sampler0 = SamplerBase(num_items, num_neg, device)
# sampler1 = PopularSampler(pop_count, num_neg, device)
sampler2 = MidxUniform(item_embs, num_neg, device, num_cluster)
sampler3 = MidxUniPop(item_embs, num_neg, device, num_cluster, pop_count)
batch_size = 1
query = torch.randn(batch_size, dim, device=device) * 0.1
# pop_item = torch.randint(0, num_items+1, size=(batch_size))
# sampler0(query, pop_item)
# sampler1(query, pop_item)
# sampler2(query, pop_item)
# sampler3(query, pop_item)
count_tensor = torch.zeros(num_items, dtype=torch.long, device=device)
for i in range(max_iter):
_, neg_items, _ = sampler2(query)
# _, neg_items, _ = sampler3(query)
ids, counts = torch.unique(neg_items -1, return_counts=True)
count_tensor[ids] += counts
# print(count_tensor.max(), counts.max())
count_t = count_tensor / count_tensor.sum(-1)
exact_prob = torch.softmax( torch.matmul(query, item_embs.T), dim=-1).squeeze()
# =========================================
# Plot prob
item_ids = pop_count.argsort()
exact_prob = exact_prob.cpu().data.numpy()
count_prob = count_t.cpu().data.numpy()
import matplotlib.pyplot as plt
plt.plot(exact_prob[item_ids].cumsum(), label='Softmax', linewidth=4.0)
plt.plot(count_prob[item_ids].cumsum(), label='Midx_Uni')
plt.legend()
plt.savefig('amazoni_check.jpg')
| 12,583 | 40.668874 | 163 | py |
KSTER | KSTER-main/test/unit/test_decoder.py | from torch.nn import GRU, LSTM
import torch
from joeynmt.decoders import RecurrentDecoder
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
class TestRecurrentDecoder(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.num_layers = 3
self.hidden_size = 6
self.encoder_hidden_size = 3
self.vocab_size = 5
seed = 42
torch.manual_seed(seed)
bidi_encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size,
bidirectional=True)
uni_encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size*2,
bidirectional=False)
self.encoders = [uni_encoder, bidi_encoder]
def test_recurrent_decoder_size(self):
# test all combinations of bridge, input_feeding, encoder directions
for encoder in self.encoders:
for init_hidden in ["bridge", "zero", "last"]:
for input_feeding in [True, False]:
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=encoder,
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden=init_hidden,
input_feeding=input_feeding)
self.assertEqual(decoder.rnn.hidden_size, self.hidden_size)
self.assertEqual(decoder.att_vector_layer.out_features,
self.hidden_size)
self.assertEqual(decoder.output_layer.out_features,
self.vocab_size)
self.assertEqual(decoder.output_size, self.vocab_size)
self.assertEqual(decoder.rnn.bidirectional, False)
self.assertEqual(decoder.init_hidden_option, init_hidden)
if init_hidden == "bridge":
self.assertTrue(hasattr(decoder, "bridge_layer"))
self.assertEqual(decoder.bridge_layer.out_features,
self.hidden_size)
self.assertEqual(decoder.bridge_layer.in_features,
encoder.output_size)
else:
self.assertFalse(hasattr(decoder, "bridge_layer"))
if input_feeding:
self.assertEqual(decoder.rnn_input_size,
self.emb_size + self.hidden_size)
else:
self.assertEqual(decoder.rnn_input_size, self.emb_size)
def test_recurrent_decoder_type(self):
valid_rnn_types = {"gru": GRU, "lstm": LSTM}
for name, obj in valid_rnn_types.items():
decoder = RecurrentDecoder(rnn_type=name,
hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False)
self.assertEqual(type(decoder.rnn), obj)
def test_recurrent_input_dropout(self):
drop_prob = 0.5
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
dropout=drop_prob,
emb_dropout=drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
decoder.train()
dropped = decoder.emb_dropout(input=input_tensor)
# eval switches off dropout
decoder.eval()
no_drop = decoder.emb_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop - (drop_prob*dropped)).abs().sum(), 0)
drop_prob = 1.0
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
dropout=drop_prob,
emb_dropout=drop_prob)
all_dropped = decoder.emb_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
decoder.eval()
none_dropped = decoder.emb_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_hidden_dropout(self):
hidden_drop_prob = 0.5
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
hidden_dropout=hidden_drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
decoder.train()
dropped = decoder.hidden_dropout(input=input_tensor)
# eval switches off dropout
decoder.eval()
no_drop = decoder.hidden_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop -
(hidden_drop_prob * dropped)).abs().sum(), 0)
hidden_drop_prob = 1.0
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
hidden_dropout=hidden_drop_prob)
all_dropped = decoder.hidden_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
decoder.eval()
none_dropped = decoder.hidden_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_freeze(self):
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False,
freeze=True)
for n, p in decoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_recurrent_forward(self):
time_dim = 4
batch_size = 2
# make sure the outputs match the targets
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=self.encoders[0],
attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="zero",
input_feeding=False)
encoder_states = torch.rand(size=(batch_size, time_dim,
self.encoders[0].output_size))
trg_inputs = torch.ones(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
#x_length = torch.Tensor([time_dim]*batch_size).int()
mask = torch.ones(size=(batch_size, 1, time_dim)).byte()
output, hidden, att_probs, att_vectors = decoder(
trg_inputs, encoder_hidden=encoder_states[:, -1, :],
encoder_output=encoder_states, src_mask=mask, unroll_steps=time_dim,
hidden=None, prev_att_vector=None)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, self.vocab_size]))
self.assertEqual(hidden.shape, torch.Size(
[batch_size, self.num_layers, self.hidden_size]))
self.assertEqual(att_probs.shape, torch.Size(
[batch_size, time_dim, time_dim]))
self.assertEqual(att_vectors.shape, torch.Size(
[batch_size, time_dim, self.hidden_size]))
hidden_target = torch.Tensor(
[[[ 0.1814, 0.5468, -0.4717, -0.7580, 0.5834, -0.4018],
[ 0.4649, 0.5484, -0.2702, 0.4545, 0.1983, 0.2771],
[-0.1752, -0.4215, 0.1941, -0.3975, -0.2317, -0.5566]],
[[ 0.1814, 0.5468, -0.4717, -0.7580, 0.5834, -0.4018],
[ 0.4649, 0.5484, -0.2702, 0.4545, 0.1983, 0.2771],
[-0.1752, -0.4215, 0.1941, -0.3975, -0.2317, -0.5566]]])
output_target = torch.Tensor(
[[[ 0.2702, -0.1988, -0.1985, -0.2998, -0.2564],
[ 0.2719, -0.2075, -0.2017, -0.2988, -0.2595],
[ 0.2720, -0.2143, -0.2084, -0.3024, -0.2537],
[ 0.2714, -0.2183, -0.2135, -0.3061, -0.2468]],
[[ 0.2757, -0.1744, -0.1888, -0.3038, -0.2466],
[ 0.2782, -0.1837, -0.1928, -0.3028, -0.2505],
[ 0.2785, -0.1904, -0.1994, -0.3066, -0.2448],
[ 0.2777, -0.1943, -0.2042, -0.3105, -0.2379]]])
att_vectors_target = torch.Tensor(
[[[-0.6196, -0.0505, 0.4900, 0.6286, -0.5007, -0.3721],
[-0.6389, -0.0337, 0.4998, 0.6458, -0.5052, -0.3579],
[-0.6396, -0.0158, 0.5058, 0.6609, -0.5035, -0.3660],
[-0.6348, -0.0017, 0.5090, 0.6719, -0.5013, -0.3771]],
[[-0.5697, -0.0887, 0.4515, 0.6128, -0.4713, -0.4068],
[-0.5910, -0.0721, 0.4617, 0.6305, -0.4760, -0.3930],
[-0.5918, -0.0544, 0.4680, 0.6461, -0.4741, -0.4008],
[-0.5866, -0.0405, 0.4712, 0.6574, -0.4718, -0.4116]]])
self.assertTensorAlmostEqual(hidden_target, hidden)
self.assertTensorAlmostEqual(output_target, output)
self.assertTensorAlmostEqual(att_vectors, att_vectors_target)
# att_probs should be a distribution over the output vocabulary
self.assertTensorAlmostEqual(att_probs.sum(2),
torch.ones(batch_size, time_dim))
| 12,263 | 51.187234 | 80 | py |
KSTER | KSTER-main/test/unit/test_loss.py | import torch
from joeynmt.loss import XentLoss
from .test_helpers import TensorTestCase
class TestTransformerUtils(TensorTestCase):
def setUp(self):
seed = 42
torch.manual_seed(seed)
def test_label_smoothing(self):
pad_index = 0
smoothing = 0.4
criterion = XentLoss(pad_index=pad_index, smoothing=smoothing)
# batch x seq_len x vocab_size: 3 x 2 x 5
predict = torch.FloatTensor(
[[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]]]
)
# batch x seq_len: 3 x 2
targets = torch.LongTensor([[2, 1],
[2, 0],
[1, 0]])
# test the smoothing function
smoothed_targets = criterion._smooth_targets(targets=targets.view(-1),
vocab_size=predict.size(-1))
self.assertTensorAlmostEqual(
smoothed_targets,
torch.Tensor(
[[0.0000, 0.1333, 0.6000, 0.1333, 0.1333],
[0.0000, 0.6000, 0.1333, 0.1333, 0.1333],
[0.0000, 0.1333, 0.6000, 0.1333, 0.1333],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.6000, 0.1333, 0.1333, 0.1333],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])
)
assert torch.max(smoothed_targets) == 1-smoothing
# test the loss computation
v = criterion(predict.log(), targets)
self.assertTensorAlmostEqual(v, 2.1326)
def test_no_label_smoothing(self):
pad_index = 0
smoothing = 0.0
criterion = XentLoss(pad_index=pad_index, smoothing=smoothing)
# batch x seq_len x vocab_size: 3 x 2 x 5
predict = torch.FloatTensor(
[[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1]]]
)
# batch x seq_len: 3 x 2
targets = torch.LongTensor([[2, 1],
[2, 0],
[1, 0]])
# test the smoothing function: should still be one-hot
smoothed_targets = criterion._smooth_targets(targets=targets.view(-1),
vocab_size=predict.size(-1))
assert torch.max(smoothed_targets) == 1
assert torch.min(smoothed_targets) == 0
self.assertTensorAlmostEqual(
smoothed_targets,
torch.Tensor(
[[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
)
v = criterion(predict.log(), targets)
self.assertTensorAlmostEqual(v, 5.6268)
| 3,037 | 34.325581 | 78 | py |
KSTER | KSTER-main/test/unit/test_weight_tying.py | from torch.nn import GRU, LSTM
import torch
import numpy as np
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
from joeynmt.model import build_model
from joeynmt.vocabulary import Vocabulary
import copy
class TestWeightTying(TensorTestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = ["tok{:02d}".format(i) for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.cfg = {
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "recurrent",
"hidden_size": 64,
"embeddings": {"embedding_dim": 32},
"num_layers": 1,
},
"decoder": {
"type": "recurrent",
"hidden_size": 64,
"embeddings": {"embedding_dim": 32},
"num_layers": 1,
},
}
}
def test_tied_embeddings(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["tied_embeddings"] = True
cfg["model"]["tied_softmax"] = False
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(src_vocab.itos, trg_vocab.itos)
self.assertEqual(model.src_embed, model.trg_embed)
self.assertTensorEqual(model.src_embed.lut.weight,
model.trg_embed.lut.weight)
self.assertEqual(model.src_embed.lut.weight.shape,
model.trg_embed.lut.weight.shape)
def test_tied_softmax(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["decoder"]["type"] = "transformer"
cfg["model"]["tied_embeddings"] = False
cfg["model"]["tied_softmax"] = True
cfg["model"]["decoder"]["embeddings"]["embedding_dim"] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
self.assertEqual(model.trg_embed.lut.weight.shape,
model.decoder.output_layer.weight.shape)
self.assertTensorEqual(model.trg_embed.lut.weight,
model.decoder.output_layer.weight)
def test_tied_src_trg_softmax(self):
# test source embedding, target embedding, and softmax tying
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
cfg["model"]["decoder"]["type"] = "transformer"
cfg["model"]["tied_embeddings"] = True
cfg["model"]["tied_softmax"] = True
cfg["model"]["decoder"]["embeddings"]["embedding_dim"] = 64
cfg["model"]["encoder"]["embeddings"]["embedding_dim"] = 64
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
src_weight = model.src_embed.lut.weight
trg_weight = model.trg_embed.lut.weight
output_weight = model.decoder.output_layer.weight
self.assertTensorEqual(src_weight, trg_weight)
self.assertTensorEqual(src_weight, output_weight)
self.assertEqual(src_weight.shape, trg_weight.shape)
self.assertEqual(trg_weight.shape, output_weight.shape)
output_weight.data.fill_(3.)
self.assertEqual(output_weight.sum().item(), 6528)
self.assertEqual(output_weight.sum().item(), src_weight.sum().item())
self.assertEqual(output_weight.sum().item(), trg_weight.sum().item())
self.assertEqual(src_weight.sum().item(), trg_weight.sum().item())
| 3,861 | 34.759259 | 77 | py |
KSTER | KSTER-main/test/unit/test_transformer_utils.py | import torch
from joeynmt.transformer_layers import PositionalEncoding
from .test_helpers import TensorTestCase
class TestTransformerUtils(TensorTestCase):
def setUp(self):
seed = 42
torch.manual_seed(seed)
def test_position_encoding(self):
batch_size = 2
max_time = 3
emb_size = hidden_size = 12
x = torch.zeros([batch_size, max_time, emb_size])
pe = PositionalEncoding(emb_size)
output = pe(x)
self.assertEqual(pe.pe.size(2), hidden_size)
self.assertTensorAlmostEqual(output, pe.pe[:, :x.size(1)])
| 595 | 23.833333 | 66 | py |
KSTER | KSTER-main/test/unit/test_batch.py | import torch
import random
from torchtext.data.batch import Batch as TorchTBatch
from joeynmt.batch import Batch
from joeynmt.data import load_data, make_data_iter
from joeynmt.constants import PAD_TOKEN
from .test_helpers import TensorTestCase
class TestData(TensorTestCase):
def setUp(self):
self.train_path = "test/data/toy/train"
self.dev_path = "test/data/toy/dev"
self.test_path = "test/data/toy/test"
self.levels = ["char", "word"] # bpe is equivalently processed to word
self.max_sent_length = 20
# minimal data config
self.data_cfg = {"src": "de", "trg": "en", "train": self.train_path,
"dev": self.dev_path, "level": "char",
"lowercase": True,
"max_sent_length": self.max_sent_length}
# load the data
self.train_data, self.dev_data, self.test_data, src_vocab, trg_vocab = \
load_data(self.data_cfg)
self.pad_index = trg_vocab.stoi[PAD_TOKEN]
# random seeds
seed = 42
torch.manual_seed(seed)
random.seed(42)
def testBatchTrainIterator(self):
batch_size = 4
self.assertEqual(len(self.train_data), 27)
# make data iterator
train_iter = make_data_iter(self.train_data, train=True, shuffle=True,
batch_size=batch_size)
self.assertEqual(train_iter.batch_size, batch_size)
self.assertTrue(train_iter.shuffle)
self.assertTrue(train_iter.train)
self.assertEqual(train_iter.epoch, 0)
self.assertEqual(train_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[21, 10, 4, 16, 4, 5, 21, 4, 12, 33, 6, 14, 4, 12, 23, 6, 18, 4,
6, 9, 3],
[20, 28, 4, 10, 28, 4, 6, 5, 14, 8, 6, 15, 4, 5, 7, 17, 11, 27,
6, 9, 3],
[24, 8, 7, 5, 24, 10, 12, 14, 5, 18, 4, 7, 17, 11, 4, 11, 4, 6,
25, 3, 1]]).long()
expected_src0_len = torch.Tensor([21, 21, 20]).long()
expected_trg0 = torch.Tensor(
[[6, 4, 27, 5, 8, 4, 5, 31, 4, 26, 7, 6, 10, 20, 11,
9, 3],
[8, 7, 6, 10, 17, 4, 13, 5, 15, 9, 3, 1, 1, 1, 1,
1, 1],
[12, 5, 4, 25, 7, 6, 8, 4, 7, 6, 18, 18, 11, 10, 12,
23, 3]]).long()
expected_trg0_len = torch.Tensor([18, 12, 18]).long()
total_samples = 0
for b in iter(train_iter):
b = Batch(torch_batch=b, pad_index=self.pad_index)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.train_data))
def testBatchDevIterator(self):
batch_size = 3
self.assertEqual(len(self.dev_data), 20)
# make data iterator
dev_iter = make_data_iter(self.dev_data, train=False, shuffle=False,
batch_size=batch_size)
self.assertEqual(dev_iter.batch_size, batch_size)
self.assertFalse(dev_iter.shuffle)
self.assertFalse(dev_iter.train)
self.assertEqual(dev_iter.epoch, 0)
self.assertEqual(dev_iter.iterations, 0)
expected_src0 = torch.Tensor(
[[29, 8, 5, 22, 5, 8, 16, 7, 19, 5, 22, 5, 24, 8, 7, 5, 7, 19,
16, 16, 5, 31, 10, 19, 11, 8, 17, 15, 10, 6, 18, 5, 7, 4, 10, 6,
5, 25, 3],
[10, 17, 11, 5, 28, 12, 4, 23, 4, 5, 0, 10, 17, 11, 5, 22, 5, 14,
8, 7, 7, 5, 10, 17, 11, 5, 14, 8, 5, 31, 10, 6, 5, 9, 3, 1,
1, 1, 1],
[29, 8, 5, 22, 5, 18, 23, 13, 4, 6, 5, 13, 8, 18, 5, 9, 3, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1]]).long()
expected_src0_len = torch.Tensor([39, 35, 17]).long()
expected_trg0 = torch.Tensor(
[[13, 11, 12, 4, 22, 4, 12, 5, 4, 22, 4, 25, 7, 6, 8, 4, 14, 12,
4, 24, 14, 5, 7, 6, 26, 17, 14, 10, 20, 4, 23, 3],
[14, 0, 28, 4, 7, 6, 18, 18, 13, 4, 8, 5, 4, 24, 11, 4, 7, 11,
16, 11, 4, 9, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[13, 11, 12, 4, 22, 4, 7, 11, 27, 27, 5, 4, 9, 3, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]).long()
expected_trg0_len = torch.Tensor([33, 24, 15]).long()
total_samples = 0
for b in iter(dev_iter):
self.assertEqual(type(b), TorchTBatch)
b = Batch(b, pad_index=self.pad_index)
# test the sorting by src length
self.assertEqual(type(b), Batch)
before_sort = b.src_length
b.sort_by_src_length()
after_sort = b.src_length
self.assertTensorEqual(torch.sort(before_sort, descending=True)[0],
after_sort)
self.assertEqual(type(b), Batch)
if total_samples == 0:
self.assertTensorEqual(b.src, expected_src0)
self.assertTensorEqual(b.src_length, expected_src0_len)
self.assertTensorEqual(b.trg, expected_trg0)
self.assertTensorEqual(b.trg_length, expected_trg0_len)
total_samples += b.nseqs
self.assertLessEqual(b.nseqs, batch_size)
self.assertEqual(total_samples, len(self.dev_data))
| 5,692 | 40.554745 | 80 | py |
KSTER | KSTER-main/test/unit/test_model_init.py | from torch.nn import GRU, LSTM
import torch
from torch import nn
import numpy as np
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
from joeynmt.model import build_model
from joeynmt.vocabulary import Vocabulary
import copy
class TestModelInit(TensorTestCase):
def setUp(self):
self.seed = 42
vocab_size = 30
tokens = ["tok{:02d}".format(i) for i in range(vocab_size)]
self.vocab = Vocabulary(tokens=tokens)
self.hidden_size = 64
self.cfg = {
"model": {
"tied_embeddings": False,
"tied_softmax": False,
"encoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
"decoder": {
"type": "transformer",
"hidden_size": self.hidden_size,
"embeddings": {"embedding_dim": self.hidden_size},
"num_layers": 1,
},
}
}
def test_transformer_layer_norm_init(self):
torch.manual_seed(self.seed)
cfg = copy.deepcopy(self.cfg)
src_vocab = trg_vocab = self.vocab
model = build_model(cfg["model"],
src_vocab=src_vocab, trg_vocab=trg_vocab)
def check_layer_norm(m: nn.Module):
for name, child in m.named_children():
if isinstance(child, nn.LayerNorm):
self.assertTensorEqual(child.weight,
torch.ones([self.hidden_size]))
self.assertTensorEqual(child.bias,
torch.zeros([self.hidden_size]))
else:
check_layer_norm(child)
check_layer_norm(model)
| 1,962 | 31.180328 | 75 | py |
KSTER | KSTER-main/test/unit/test_encoder.py | from torch.nn import GRU, LSTM
import torch
from joeynmt.encoders import RecurrentEncoder
from .test_helpers import TensorTestCase
class TestRecurrentEncoder(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.num_layers = 3
self.hidden_size = 7
seed = 42
torch.manual_seed(seed)
def test_recurrent_encoder_size(self):
for bidirectional in [True, False]:
directional_factor = 2 if bidirectional else 1
encoder = RecurrentEncoder(hidden_size=self.hidden_size,
emb_size=self.emb_size,
num_layers=self.num_layers,
bidirectional=bidirectional)
self.assertEqual(encoder.rnn.hidden_size, self.hidden_size)
# output size is affected by bidirectionality
self.assertEqual(encoder.output_size,
self.hidden_size*directional_factor)
self.assertEqual(encoder.rnn.bidirectional, bidirectional)
def test_recurrent_encoder_type(self):
valid_rnn_types = {"gru": GRU, "lstm": LSTM}
for name, obj in valid_rnn_types.items():
encoder = RecurrentEncoder(rnn_type=name)
self.assertEqual(type(encoder.rnn), obj)
def test_recurrent_input_dropout(self):
drop_prob = 0.5
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
input_tensor = torch.Tensor([2, 3, 1, -1])
encoder.train()
dropped = encoder.emb_dropout(input=input_tensor)
# eval switches off dropout
encoder.eval()
no_drop = encoder.emb_dropout(input=input_tensor)
# when dropout is applied, remaining values are divided by drop_prob
self.assertGreaterEqual((no_drop - (drop_prob*dropped)).abs().sum(), 0)
drop_prob = 1.0
encoder = RecurrentEncoder(dropout=drop_prob, emb_dropout=drop_prob)
all_dropped = encoder.emb_dropout(input=input_tensor)
self.assertEqual(all_dropped.sum(), 0)
encoder.eval()
none_dropped = encoder.emb_dropout(input=input_tensor)
self.assertTensorEqual(no_drop, none_dropped)
self.assertTensorEqual((no_drop - all_dropped), no_drop)
def test_recurrent_freeze(self):
encoder = RecurrentEncoder(freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_recurrent_forward(self):
time_dim = 4
batch_size = 2
bidirectional = True
directions = 2 if bidirectional else 1
encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.hidden_size,
bidirectional=bidirectional)
x = torch.rand(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
x_length = torch.Tensor([time_dim]*batch_size).int()
mask = torch.ones_like(x)
output, hidden = encoder(embed_src=x, src_length=x_length, mask=mask)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, directions*self.hidden_size]))
self.assertEqual(hidden.shape, torch.Size(
[batch_size, directions*self.hidden_size]))
hidden_target = torch.Tensor(
[[0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405,
0.1226, -0.3333, -0.3186, -0.2411, 0.1790, 0.1281,
0.0739, -0.0536],
[0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525,
0.0946, -0.3279, -0.3001, -0.2223, 0.2023, 0.0708,
0.0131, -0.0124]])
output_target = torch.Tensor(
[[[[ 0.0041, 0.0324, 0.0846, -0.0056, 0.0353, -0.2528, 0.0289,
-0.3333, -0.3186, -0.2411, 0.1790, 0.1281, 0.0739, -0.0536],
[ 0.0159, 0.0248, 0.1496, -0.0176, 0.0457, -0.3839, 0.0780,
-0.3137, -0.2731, -0.2310, 0.1866, 0.0758, 0.0366, -0.0069],
[ 0.0656, 0.0168, 0.2182, -0.0391, 0.0214, -0.4389, 0.1100,
-0.2625, -0.1970, -0.2249, 0.1374, 0.0337, 0.0139, 0.0284],
[ 0.1323, 0.0125, 0.2900, -0.0725, -0.0102, -0.4405, 0.1226,
-0.1649, -0.1023, -0.1823, 0.0712, 0.0039, -0.0228, 0.0444]],
[[ 0.0296, 0.0254, 0.1007, -0.0225, 0.0207, -0.2612, 0.0061,
-0.3279, -0.3001, -0.2223, 0.2023, 0.0708, 0.0131, -0.0124],
[ 0.0306, 0.0096, 0.1566, -0.0386, 0.0387, -0.3958, 0.0556,
-0.3034, -0.2701, -0.2165, 0.2061, 0.0364, -0.0012, 0.0184],
[ 0.0842, 0.0075, 0.2181, -0.0696, 0.0121, -0.4389, 0.0874,
-0.2432, -0.1979, -0.2168, 0.1519, 0.0066, -0.0080, 0.0485],
[ 0.1431, 0.0085, 0.2828, -0.0933, -0.0139, -0.4525, 0.0946,
-0.1608, -0.1140, -0.1646, 0.0796, -0.0202, -0.0207, 0.0379]]]])
self.assertTensorAlmostEqual(hidden_target, hidden)
self.assertTensorAlmostEqual(output_target, output)
| 5,086 | 45.669725 | 79 | py |
KSTER | KSTER-main/test/unit/test_search.py | import torch
import numpy as np
from joeynmt.search import greedy, recurrent_greedy, transformer_greedy
from joeynmt.search import beam_search
from joeynmt.decoders import RecurrentDecoder, TransformerDecoder
from joeynmt.encoders import RecurrentEncoder
from joeynmt.embeddings import Embeddings
from joeynmt.model import Model
from joeynmt.vocabulary import Vocabulary
from .test_helpers import TensorTestCase
# TODO for transformer and rnn, make sure both return the same result for
# beam_size<2 and greedy decoding
class TestSearch(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
self.encoder_hidden_size = 3
self.vocab = Vocabulary(tokens=['word'])
self.vocab_size = len(self.vocab) # = 5
seed = 42
torch.manual_seed(seed)
#self.bos_index = 2
self.pad_index = 1
#self.eos_index = 3
class TestSearchTransformer(TestSearch):
def _build(self, batch_size):
src_time_dim = 4
vocab_size = 7
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=vocab_size,
padding_idx=self.pad_index)
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, emb_dropout=self.dropout,
vocab_size=vocab_size)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, self.hidden_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
encoder_hidden = None # unused
model = Model(encoder=None, decoder=decoder,
src_embed=emb, trg_embed=emb,
src_vocab=self.vocab, trg_vocab=self.vocab)
return src_mask, model, encoder_output, encoder_hidden
def test_transformer_greedy(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = transformer_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer greedy doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[5, 5, 5], [5, 5, 5]])
def test_transformer_beam1(self):
batch_size = 2
beam_size = 1
alpha = 1.
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = beam_search(
size=beam_size, src_mask=src_mask, max_output_length=max_output_length,
model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer beam doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[5, 5, 5], [5, 5, 5]])
# now compare to greedy, they should be the same for beam=1
greedy_output, _ = transformer_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
np.testing.assert_equal(output, greedy_output)
def test_transformer_beam7(self):
batch_size = 2
beam_size = 7
alpha = 1.
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
# Transformer beam doesn't return attention scores
self.assertIsNone(attention_scores)
# batch x time
# now it produces EOS, so everything after gets cut off
self.assertEqual(output.shape, (batch_size, 1))
np.testing.assert_equal(output, [[3], [3]])
class TestSearchRecurrent(TestSearch):
def _build(self, batch_size):
src_time_dim = 4
vocab_size = 7
emb = Embeddings(embedding_dim=self.emb_size, vocab_size=vocab_size,
padding_idx=self.pad_index)
encoder = RecurrentEncoder(emb_size=self.emb_size,
num_layers=self.num_layers,
hidden_size=self.encoder_hidden_size,
bidirectional=True)
decoder = RecurrentDecoder(hidden_size=self.hidden_size,
encoder=encoder, attention="bahdanau",
emb_size=self.emb_size,
vocab_size=self.vocab_size,
num_layers=self.num_layers,
init_hidden="bridge",
input_feeding=True)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, encoder.output_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
encoder_hidden = torch.rand(size=(batch_size, encoder.output_size))
model = Model(encoder=encoder, decoder=decoder,
src_embed=emb, trg_embed=emb,
src_vocab=self.vocab, trg_vocab=self.vocab)
return src_mask, model, encoder_output, encoder_hidden
def test_recurrent_greedy(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
output, attention_scores = recurrent_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
self.assertEqual(output.shape, (batch_size, max_output_length))
np.testing.assert_equal(output, [[4, 0, 4], [4, 4, 4]])
expected_attention_scores = np.array(
[[[0.22914883, 0.24638498, 0.21247596, 0.3119903],
[0.22970565, 0.24540883, 0.21261126, 0.31227428],
[0.22903332, 0.2459198, 0.2110187, 0.3140282]],
[[0.252522, 0.29074305, 0.257121, 0.19961396],
[0.2519883, 0.2895494, 0.25718424, 0.201278],
[0.2523954, 0.28959078, 0.25769445, 0.2003194]]])
np.testing.assert_array_almost_equal(attention_scores,
expected_attention_scores)
self.assertEqual(attention_scores.shape,
(batch_size, max_output_length, 4))
def test_recurrent_beam1(self):
# beam=1 and greedy should return the same result
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
greedy_output, _ = recurrent_greedy(
src_mask=src_mask, max_output_length=max_output_length, model=model,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
beam_size = 1
alpha = 1.0
output, _ = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
np.testing.assert_array_equal(greedy_output, output)
def test_recurrent_beam7(self):
batch_size = 2
max_output_length = 3
src_mask, model, encoder_output, encoder_hidden = self._build(
batch_size=batch_size)
beam_size = 7
alpha = 1.0
output, _ = beam_search(
size=beam_size, src_mask=src_mask, n_best=1,
max_output_length=max_output_length, model=model, alpha=alpha,
encoder_output=encoder_output, encoder_hidden=encoder_hidden)
self.assertEqual(output.shape, (2, 1))
np.testing.assert_array_equal(output, [[3], [3]])
| 8,723 | 39.018349 | 83 | py |
KSTER | KSTER-main/test/unit/test_transformer_decoder.py | import torch
from joeynmt.decoders import TransformerDecoder, TransformerDecoderLayer
from .test_helpers import TensorTestCase
class TestTransformerDecoder(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
seed = 42
torch.manual_seed(seed)
def test_transformer_decoder_freeze(self):
decoder = TransformerDecoder(freeze=True)
for n, p in decoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_transformer_decoder_output_size(self):
vocab_size = 11
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
if not hasattr(decoder, "output_size"):
self.fail("Missing output_size property.")
self.assertEqual(decoder.output_size, vocab_size)
def test_transformer_decoder_forward(self):
batch_size = 2
src_time_dim = 4
trg_time_dim = 5
vocab_size = 7
trg_embed = torch.rand(size=(batch_size, trg_time_dim, self.emb_size))
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, emb_dropout=self.dropout,
vocab_size=vocab_size)
encoder_output = torch.rand(
size=(batch_size, src_time_dim, self.hidden_size))
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
trg_mask = torch.ones(size=(batch_size, trg_time_dim, 1)) == 1
encoder_hidden = None # unused
decoder_hidden = None # unused
unrol_steps = None # unused
output, states, _, _ = decoder(
trg_embed, encoder_output, encoder_hidden, src_mask, unrol_steps,
decoder_hidden, trg_mask)
output_target = torch.Tensor(
[[[0.1718, 0.5595, -0.1996, -0.6924, 0.4351, -0.0850, 0.2805],
[0.0666, 0.4923, -0.1724, -0.6804, 0.3983, -0.1111, 0.2194],
[-0.0315, 0.3673, -0.2320, -0.6100, 0.3019, 0.0422, 0.2514],
[-0.0026, 0.3807, -0.2195, -0.6010, 0.3081, -0.0101, 0.2099],
[-0.0172, 0.3384, -0.2853, -0.5799, 0.2470, 0.0312, 0.2518]],
[[0.0284, 0.3918, -0.2010, -0.6472, 0.3646, -0.0296, 0.1791],
[0.1017, 0.4387, -0.2031, -0.7084, 0.3051, -0.1354, 0.2511],
[0.0155, 0.4274, -0.2061, -0.6702, 0.3085, -0.0617, 0.2830],
[0.0227, 0.4067, -0.1697, -0.6463, 0.3277, -0.0423, 0.2333],
[0.0133, 0.4409, -0.1186, -0.5694, 0.4450, 0.0290, 0.1643]]]
)
self.assertEqual(output_target.shape, output.shape)
self.assertTensorAlmostEqual(output_target, output)
greedy_predictions = output.argmax(-1)
expect_predictions = output_target.argmax(-1)
self.assertTensorEqual(expect_predictions, greedy_predictions)
states_target = torch.Tensor(
[[[3.7535e-02, 5.3508e-01, 4.9478e-02, -9.1961e-01, -5.3966e-01,
-1.0065e-01, 4.3053e-01, -3.0671e-01, -1.2724e-02, -4.1879e-01,
5.9625e-01, 1.1887e-01],
[1.3837e-01, 4.6963e-01, -3.7059e-02, -6.8479e-01, -4.6042e-01,
-1.0072e-01, 3.9374e-01, -3.0429e-01, -5.4203e-02, -4.3680e-01,
6.4257e-01, 1.1424e-01],
[1.0263e-01, 3.8331e-01, -2.5586e-02, -6.4478e-01, -4.5860e-01,
-1.0590e-01, 5.8806e-01, -2.8856e-01, 1.1084e-02, -4.7479e-01,
5.9094e-01, 1.6089e-01],
[7.3408e-02, 3.7701e-01, -5.8783e-02, -6.2368e-01, -4.4201e-01,
-1.0237e-01, 5.2556e-01, -3.0821e-01, -5.3345e-02, -4.5606e-01,
5.8259e-01, 1.2531e-01],
[4.1206e-02, 3.6129e-01, -1.2955e-02, -5.8638e-01, -4.6023e-01,
-9.4267e-02, 5.5464e-01, -3.0029e-01, -3.3974e-02, -4.8347e-01,
5.4088e-01, 1.2015e-01]],
[[1.1017e-01, 4.7179e-01, 2.6402e-02, -7.2170e-01, -3.9778e-01,
-1.0226e-01, 5.3498e-01, -2.8369e-01, -1.1081e-01, -4.6096e-01,
5.9517e-01, 1.3531e-01],
[2.1947e-01, 4.6407e-01, 8.4276e-02, -6.3263e-01, -4.4953e-01,
-9.7334e-02, 4.0321e-01, -2.9893e-01, -1.0368e-01, -4.5760e-01,
6.1378e-01, 1.3509e-01],
[2.1437e-01, 4.1372e-01, 1.9859e-02, -5.7415e-01, -4.5025e-01,
-9.8621e-02, 4.1182e-01, -2.8410e-01, -1.2729e-03, -4.8586e-01,
6.2318e-01, 1.4731e-01],
[1.9153e-01, 3.8401e-01, 2.6096e-02, -6.2339e-01, -4.0685e-01,
-9.7387e-02, 4.1836e-01, -2.8648e-01, -1.7857e-02, -4.7678e-01,
6.2907e-01, 1.7617e-01],
[3.1713e-02, 3.7548e-01, -6.3005e-02, -7.9804e-01, -3.6541e-01,
-1.0398e-01, 4.2991e-01, -2.9607e-01, 2.1376e-04, -4.5897e-01,
6.1062e-01, 1.6142e-01]]]
)
self.assertEqual(states_target.shape, states.shape)
self.assertTensorAlmostEqual(states_target, states)
def test_transformer_decoder_layers(self):
vocab_size = 7
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
self.assertEqual(len(decoder.layers), self.num_layers)
for layer in decoder.layers:
self.assertTrue(isinstance(layer, TransformerDecoderLayer))
self.assertTrue(hasattr(layer, "src_trg_att"))
self.assertTrue(hasattr(layer, "trg_trg_att"))
self.assertTrue(hasattr(layer, "feed_forward"))
self.assertEqual(layer.size, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].in_features, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].out_features, self.ff_size)
| 6,322 | 42.909722 | 79 | py |
KSTER | KSTER-main/test/unit/test_transformer_encoder.py | import torch
from joeynmt.encoders import TransformerEncoder
from .test_helpers import TensorTestCase
class TestTransformerEncoder(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
self.seed = 42
torch.manual_seed(self.seed)
def test_transformer_encoder_freeze(self):
encoder = TransformerEncoder(freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_transformer_encoder_forward(self):
batch_size = 2
time_dim = 4
torch.manual_seed(self.seed)
encoder = TransformerEncoder(
hidden_size=self.hidden_size, ff_size=self.ff_size,
num_layers=self.num_layers, num_heads=self.num_heads,
dropout=self.dropout, emb_dropout=self.dropout)
for p in encoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
x = torch.rand(size=(batch_size, time_dim, self.emb_size))
# no padding, no mask
x_length = torch.Tensor([time_dim] * batch_size).int()
mask = torch.ones([batch_size, 1, time_dim]) == 1
output, hidden = encoder(x, x_length, mask)
self.assertEqual(output.shape, torch.Size(
[batch_size, time_dim, self.hidden_size]))
self.assertEqual(hidden, None)
output_target = torch.Tensor(
[[[1.9728e-01, -1.2042e-01, 8.0998e-02, 1.3411e-03, -3.5960e-01,
-5.2988e-01, -5.6056e-01, -3.5297e-01, 2.6680e-01, 2.8343e-01,
-3.7342e-01, -5.9112e-03],
[8.9687e-02, -1.2491e-01, 7.7809e-02, -1.3500e-03, -2.7002e-01,
-4.7312e-01, -5.7981e-01, -4.1998e-01, 1.0457e-01, 2.9726e-01,
-3.9461e-01, 8.1598e-02],
[3.4988e-02, -1.3020e-01, 6.0043e-02, 2.7782e-02, -3.1483e-01,
-3.8940e-01, -5.5557e-01, -5.9540e-01, -2.9808e-02, 3.1468e-01,
-4.5809e-01, 4.3313e-03],
[1.2234e-01, -1.3285e-01, 6.3068e-02, -2.3343e-02, -2.3519e-01,
-4.0794e-01, -5.6063e-01, -5.5484e-01, -1.1272e-01, 3.0103e-01,
-4.0983e-01, 3.3038e-02]],
[[9.8597e-02, -1.2121e-01, 1.0718e-01, -2.2644e-02, -4.0282e-01,
-4.2646e-01, -5.9981e-01, -3.7200e-01, 1.9538e-01, 2.7036e-01,
-3.4072e-01, -1.7966e-03],
[8.8470e-02, -1.2618e-01, 5.3351e-02, -1.8531e-02, -3.3834e-01,
-4.9047e-01, -5.7063e-01, -4.9790e-01, 2.2070e-01, 3.3964e-01,
-4.1604e-01, 2.3519e-02],
[5.8373e-02, -1.2706e-01, 1.0598e-01, 9.3277e-05, -3.0493e-01,
-4.4406e-01, -5.4723e-01, -5.2214e-01, 8.0374e-02, 2.6307e-01,
- 4.4571e-01, 8.7052e-02],
[7.9567e-02, -1.2977e-01, 1.1731e-01, 2.6198e-02, -2.4024e-01,
-4.2161e-01, -5.7604e-01, -7.3298e-01, 1.6698e-01, 3.1454e-01,
-4.9189e-01, 2.4027e-02]]]
)
self.assertTensorAlmostEqual(output_target, output)
| 3,144 | 40.381579 | 78 | py |
KSTER | KSTER-main/test/unit/test_embeddings.py | import torch
from joeynmt.embeddings import Embeddings
from .test_helpers import TensorTestCase
class TestEmbeddings(TensorTestCase):
def setUp(self):
self.emb_size = 10
self.vocab_size = 11
self.pad_idx = 1
seed = 42
torch.manual_seed(seed)
def test_size(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self.assertEqual(emb.lut.weight.shape,
torch.Size([self.vocab_size, self.emb_size]))
def test_pad_zeros(self):
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
# pad embedding should be zeros
self.assertTensorEqual(emb.lut.weight[self.pad_idx],
torch.zeros([self.emb_size]))
def test_freeze(self):
encoder = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_forward(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx)
self._fill_embeddings(emb, weights)
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# embedding operation is just slicing from weights matrix
self.assertTensorEqual(embedded, torch.index_select(input=weights,
index=indices, dim=0))
# after embedding, representations for PAD should still be zero
self.assertTensorEqual(embedded[2], torch.zeros([self.emb_size]))
def test_scale(self):
# fix the embedding weights
weights = self._get_random_embedding_weights()
emb = Embeddings(embedding_dim=self.emb_size,
vocab_size=self.vocab_size,
padding_idx=self.pad_idx,
scale=True)
emb.lut.weight.data = weights
indices = torch.Tensor([0, 1, self.pad_idx, 9]).long()
embedded = emb.forward(x=indices)
# now scaled
self.assertTensorNotEqual(
torch.index_select(input=weights, index=indices, dim=0), embedded)
self.assertTensorEqual(
torch.index_select(input=weights, index=indices, dim=0)*
(self.emb_size**0.5), embedded)
def _fill_embeddings(self, embeddings, weights):
embeddings.lut.weight.data = weights
def _get_random_embedding_weights(self):
weights = torch.rand([self.vocab_size, self.emb_size])
weights[self.pad_idx] = torch.zeros([self.emb_size])
return weights
| 3,091 | 38.139241 | 78 | py |
KSTER | KSTER-main/test/unit/test_attention.py | import torch
from joeynmt.attention import BahdanauAttention, LuongAttention
from .test_helpers import TensorTestCase
class TestBahdanauAttention(TensorTestCase):
def setUp(self):
self.key_size = 3
self.query_size = 5
self.hidden_size = 7
seed = 42
torch.manual_seed(seed)
self.bahdanau_att = BahdanauAttention(hidden_size=self.hidden_size,
key_size=self.key_size,
query_size=self.query_size)
def test_bahdanau_attention_size(self):
self.assertIsNone(self.bahdanau_att.key_layer.bias) # no bias
self.assertIsNone(self.bahdanau_att.query_layer.bias) # no bias
self.assertEqual(self.bahdanau_att.key_layer.weight.shape,
torch.Size([self.hidden_size, self.key_size]))
self.assertEqual(self.bahdanau_att.query_layer.weight.shape,
torch.Size([self.hidden_size, self.query_size]))
self.assertEqual(self.bahdanau_att.energy_layer.weight.shape,
torch.Size([1, self.hidden_size]))
self.assertIsNone(self.bahdanau_att.energy_layer.bias)
def test_bahdanau_forward(self):
src_length = 5
trg_length = 4
batch_size = 6
queries = torch.rand(size=(batch_size, trg_length, self.query_size))
keys = torch.rand(size=(batch_size, src_length, self.key_size))
mask = torch.ones(size=(batch_size, 1, src_length)).byte()
# introduce artificial padding areas
mask[0, 0, -3:] = 0
mask[1, 0, -2:] = 0
mask[4, 0, -1:] = 0
for t in range(trg_length):
c, att = None, None
try:
# should raise an AssertionException (missing pre-computation)
query = queries[:, t, :].unsqueeze(1)
c, att = self.bahdanau_att(query=query, mask=mask, values=keys)
except AssertionError:
pass
self.assertIsNone(c)
self.assertIsNone(att)
# now with pre-computation
self.bahdanau_att.compute_proj_keys(keys=keys)
self.assertIsNotNone(self.bahdanau_att.proj_keys)
self.assertEqual(self.bahdanau_att.proj_keys.shape,
torch.Size(
[batch_size, src_length, self.hidden_size]))
contexts = []
attention_probs = []
for t in range(trg_length):
c, att = None, None
try:
# should not raise an AssertionException
query = queries[:, t, :].unsqueeze(1)
c, att = self.bahdanau_att(query=query, mask=mask, values=keys)
except AssertionError:
self.fail()
self.assertIsNotNone(c)
self.assertIsNotNone(att)
self.assertEqual(self.bahdanau_att.proj_query.shape,
torch.Size([batch_size, 1, self.hidden_size]))
contexts.append(c)
attention_probs.append(att)
self.assertEqual(len(attention_probs), trg_length)
self.assertEqual(len(contexts), trg_length)
contexts = torch.cat(contexts, dim=1)
attention_probs = torch.cat(attention_probs, dim=1)
self.assertEqual(contexts.shape,
torch.Size(
[batch_size, trg_length, self.key_size]))
self.assertEqual(attention_probs.shape,
torch.Size([batch_size, trg_length, src_length]))
contexts_target = torch.Tensor(
[[[0.5080, 0.5832, 0.5614],
[0.5096, 0.5816, 0.5596],
[0.5092, 0.5820, 0.5601],
[0.5079, 0.5833, 0.5615]],
[[0.4709, 0.5817, 0.3091],
[0.4720, 0.5793, 0.3063],
[0.4704, 0.5825, 0.3102],
[0.4709, 0.5814, 0.3090]],
[[0.4394, 0.4482, 0.6526],
[0.4390, 0.4475, 0.6522],
[0.4391, 0.4479, 0.6538],
[0.4391, 0.4479, 0.6533]],
[[0.5283, 0.3441, 0.3938],
[0.5297, 0.3457, 0.3956],
[0.5306, 0.3466, 0.3966],
[0.5274, 0.3431, 0.3926]],
[[0.4079, 0.4145, 0.2439],
[0.4064, 0.4156, 0.2445],
[0.4077, 0.4147, 0.2439],
[0.4067, 0.4153, 0.2444]],
[[0.5649, 0.5749, 0.4960],
[0.5660, 0.5763, 0.4988],
[0.5658, 0.5754, 0.4984],
[0.5662, 0.5766, 0.4991]]]
)
self.assertTensorAlmostEqual(contexts_target, contexts)
attention_probs_targets = torch.Tensor(
[[[0.4904, 0.5096, 0.0000, 0.0000, 0.0000],
[0.4859, 0.5141, 0.0000, 0.0000, 0.0000],
[0.4871, 0.5129, 0.0000, 0.0000, 0.0000],
[0.4906, 0.5094, 0.0000, 0.0000, 0.0000]],
[[0.3314, 0.3278, 0.3408, 0.0000, 0.0000],
[0.3337, 0.3230, 0.3433, 0.0000, 0.0000],
[0.3301, 0.3297, 0.3402, 0.0000, 0.0000],
[0.3312, 0.3275, 0.3413, 0.0000, 0.0000]],
[[0.1977, 0.2047, 0.2040, 0.1936, 0.1999],
[0.1973, 0.2052, 0.2045, 0.1941, 0.1988],
[0.1987, 0.2046, 0.2046, 0.1924, 0.1996],
[0.1984, 0.2047, 0.2044, 0.1930, 0.1995]],
[[0.1963, 0.2041, 0.2006, 0.1942, 0.2047],
[0.1954, 0.2065, 0.2011, 0.1934, 0.2036],
[0.1947, 0.2074, 0.2014, 0.1928, 0.2038],
[0.1968, 0.2028, 0.2006, 0.1949, 0.2049]],
[[0.2455, 0.2414, 0.2588, 0.2543, 0.0000],
[0.2450, 0.2447, 0.2566, 0.2538, 0.0000],
[0.2458, 0.2417, 0.2586, 0.2540, 0.0000],
[0.2452, 0.2438, 0.2568, 0.2542, 0.0000]],
[[0.1999, 0.1888, 0.1951, 0.2009, 0.2153],
[0.2035, 0.1885, 0.1956, 0.1972, 0.2152],
[0.2025, 0.1885, 0.1950, 0.1980, 0.2159],
[0.2044, 0.1884, 0.1955, 0.1970, 0.2148]]]
)
self.assertTensorAlmostEqual(attention_probs_targets, attention_probs)
def test_bahdanau_precompute_None(self):
self.assertIsNone(self.bahdanau_att.proj_keys)
self.assertIsNone(self.bahdanau_att.proj_query)
def test_bahdanau_precompute(self):
src_length = 5
batch_size = 6
keys = torch.rand(size=(batch_size, src_length, self.key_size))
self.bahdanau_att.compute_proj_keys(keys=keys)
proj_keys_targets = torch.Tensor(
[[[0.4042, 0.1373, 0.3308, 0.2317, 0.3011, 0.2978, -0.0975],
[0.4740, 0.4829, -0.0853, -0.2634, 0.4623, 0.0333, -0.2702],
[0.4540, 0.0645, 0.6046, 0.4632, 0.3459, 0.4631, -0.0919],
[0.4744, 0.5098, -0.2441, -0.3713, 0.4265, -0.0407, -0.2527],
[0.0314, 0.1189, 0.3825, 0.1119, 0.2548, 0.1239, -0.1921]],
[[0.7057, 0.2725, 0.2426, 0.1979, 0.4285, 0.3727, -0.1126],
[0.3967, 0.0223, 0.3664, 0.3488, 0.2107, 0.3531, -0.0095],
[0.4311, 0.4695, 0.3035, -0.0640, 0.5914, 0.1713, -0.3695],
[0.0797, 0.1038, 0.3847, 0.1476, 0.2486, 0.1568, -0.1672],
[0.3379, 0.3671, 0.3622, 0.0166, 0.5097, 0.1845, -0.3207]],
[[0.4051, 0.4552, -0.0709, -0.2616, 0.4339, 0.0126, -0.2682],
[0.5379, 0.5037, 0.0074, -0.2046, 0.5243, 0.0969, -0.2953],
[0.0250, 0.0544, 0.3859, 0.1679, 0.1976, 0.1471, -0.1392],
[0.1880, 0.2725, 0.1849, -0.0598, 0.3383, 0.0693, -0.2329],
[0.0759, 0.1006, 0.0955, -0.0048, 0.1361, 0.0400, -0.0913]],
[[-0.0207, 0.1266, 0.5529, 0.1728, 0.3192, 0.1611, -0.2560],
[0.5713, 0.2364, 0.0718, 0.0801, 0.3141, 0.2455, -0.0729],
[0.1574, 0.1162, 0.3591, 0.1572, 0.2602, 0.1838, -0.1510],
[0.1357, 0.0192, 0.1817, 0.1391, 0.1037, 0.1389, -0.0277],
[0.3088, 0.2804, 0.2024, -0.0045, 0.3680, 0.1386, -0.2127]],
[[0.1181, 0.0899, 0.1139, 0.0329, 0.1390, 0.0744, -0.0758],
[0.0713, 0.2682, 0.4111, 0.0129, 0.4044, 0.0985, -0.3177],
[0.5340, 0.1713, 0.5365, 0.3679, 0.4262, 0.4373, -0.1456],
[0.3902, -0.0242, 0.4498, 0.4313, 0.1997, 0.4012, 0.0075],
[0.1764, 0.1531, -0.0564, -0.0876, 0.1390, 0.0129, -0.0714]],
[[0.3772, 0.3725, 0.3053, -0.0012, 0.4982, 0.1808, -0.3006],
[0.4391, -0.0472, 0.3379, 0.4136, 0.1434, 0.3918, 0.0687],
[0.3697, 0.2313, 0.4745, 0.2100, 0.4348, 0.3000, -0.2242],
[0.8427, 0.3705, 0.1227, 0.1079, 0.4890, 0.3604, -0.1305],
[0.3526, 0.3477, 0.1473, -0.0740, 0.4132, 0.1138, -0.2452]]]
)
self.assertTensorAlmostEqual(proj_keys_targets,
self.bahdanau_att.proj_keys)
class TestLuongAttention(TensorTestCase):
def setUp(self):
self.addTypeEqualityFunc(torch.Tensor,
lambda x, y, msg: self.failureException(
msg) if not torch.equal(x, y) else True)
self.key_size = 3
self.query_size = 5
self.hidden_size = self.query_size
seed = 42
torch.manual_seed(seed)
self.luong_att = LuongAttention(hidden_size=self.hidden_size,
key_size=self.key_size)
def test_luong_attention_size(self):
self.assertIsNone(self.luong_att.key_layer.bias) # no bias
self.assertEqual(self.luong_att.key_layer.weight.shape,
torch.Size([self.hidden_size, self.key_size]))
def test_luong_attention_forward(self):
src_length = 5
trg_length = 4
batch_size = 6
queries = torch.rand(size=(batch_size, trg_length, self.query_size))
keys = torch.rand(size=(batch_size, src_length, self.key_size))
mask = torch.ones(size=(batch_size, 1, src_length)).byte()
# introduce artificial padding areas
mask[0, 0, -3:] = 0
mask[1, 0, -2:] = 0
mask[4, 0, -1:] = 0
for t in range(trg_length):
c, att = None, None
try:
# should raise an AssertionException (missing pre-computation)
query = queries[:, t, :].unsqueeze(1)
c, att = self.luong_att(query=query, mask=mask, values=keys)
except AssertionError:
pass
self.assertIsNone(c)
self.assertIsNone(att)
# now with pre-computation
self.luong_att.compute_proj_keys(keys=keys)
self.assertIsNotNone(self.luong_att.proj_keys)
self.assertEqual(self.luong_att.proj_keys.shape,
torch.Size([batch_size, src_length, self.hidden_size]))
contexts = []
attention_probs = []
for t in range(trg_length):
c, att = None, None
try:
# should not raise an AssertionException
query = queries[:, t, :].unsqueeze(1)
c, att = self.luong_att(query=query, mask=mask, values=keys)
except AssertionError:
self.fail()
self.assertIsNotNone(c)
self.assertIsNotNone(att)
contexts.append(c)
attention_probs.append(att)
self.assertEqual(len(attention_probs), trg_length)
self.assertEqual(len(contexts), trg_length)
contexts = torch.cat(contexts, dim=1)
attention_probs = torch.cat(attention_probs, dim=1)
self.assertEqual(contexts.shape,
torch.Size([batch_size, trg_length, self.key_size]))
self.assertEqual(attention_probs.shape,
torch.Size([batch_size, trg_length, src_length]))
context_targets = torch.Tensor([[[0.5347, 0.2918, 0.4707],
[0.5062, 0.2657, 0.4117],
[0.4969, 0.2572, 0.3926],
[0.5320, 0.2893, 0.4651]],
[[0.5210, 0.6707, 0.4343],
[0.5111, 0.6809, 0.4274],
[0.5156, 0.6622, 0.4274],
[0.5046, 0.6634, 0.4175]],
[[0.4998, 0.5570, 0.3388],
[0.4949, 0.5357, 0.3609],
[0.4982, 0.5208, 0.3468],
[0.5013, 0.5474, 0.3503]],
[[0.5911, 0.6944, 0.5319],
[0.5964, 0.6899, 0.5257],
[0.6161, 0.6771, 0.5042],
[0.5937, 0.7011, 0.5330]],
[[0.4439, 0.5916, 0.3691],
[0.4409, 0.5970, 0.3762],
[0.4446, 0.5845, 0.3659],
[0.4417, 0.6157, 0.3796]],
[[0.4581, 0.4343, 0.5151],
[0.4493, 0.4297, 0.5348],
[0.4399, 0.4265, 0.5419],
[0.4833, 0.4570, 0.4855]]])
self.assertTensorAlmostEqual(context_targets, contexts)
attention_probs_targets = torch.Tensor(
[[[0.3238, 0.6762, 0.0000, 0.0000, 0.0000],
[0.4090, 0.5910, 0.0000, 0.0000, 0.0000],
[0.4367, 0.5633, 0.0000, 0.0000, 0.0000],
[0.3319, 0.6681, 0.0000, 0.0000, 0.0000]],
[[0.2483, 0.3291, 0.4226, 0.0000, 0.0000],
[0.2353, 0.3474, 0.4174, 0.0000, 0.0000],
[0.2725, 0.3322, 0.3953, 0.0000, 0.0000],
[0.2803, 0.3476, 0.3721, 0.0000, 0.0000]],
[[0.1955, 0.1516, 0.2518, 0.1466, 0.2546],
[0.2220, 0.1613, 0.2402, 0.1462, 0.2303],
[0.2074, 0.1953, 0.2142, 0.1536, 0.2296],
[0.2100, 0.1615, 0.2434, 0.1376, 0.2475]],
[[0.2227, 0.2483, 0.1512, 0.1486, 0.2291],
[0.2210, 0.2331, 0.1599, 0.1542, 0.2318],
[0.2123, 0.1808, 0.1885, 0.1702, 0.2482],
[0.2233, 0.2479, 0.1435, 0.1433, 0.2421]],
[[0.2475, 0.2482, 0.2865, 0.2178, 0.0000],
[0.2494, 0.2410, 0.2976, 0.2120, 0.0000],
[0.2498, 0.2449, 0.2778, 0.2275, 0.0000],
[0.2359, 0.2603, 0.3174, 0.1864, 0.0000]],
[[0.2362, 0.1929, 0.2128, 0.1859, 0.1723],
[0.2230, 0.2118, 0.2116, 0.1890, 0.1646],
[0.2118, 0.2251, 0.2039, 0.1891, 0.1700],
[0.2859, 0.1874, 0.2083, 0.1583, 0.1601]]])
self.assertTensorAlmostEqual(attention_probs_targets, attention_probs)
def test_luong_precompute_None(self):
self.assertIsNone(self.luong_att.proj_keys)
def test_luong_precompute(self):
src_length = 5
batch_size = 6
keys = torch.rand(size=(batch_size, src_length, self.key_size))
self.luong_att.compute_proj_keys(keys=keys)
proj_keys_targets = torch.Tensor(
[[[0.5362, 0.1826, 0.4716, 0.3245, 0.4122],
[0.3819, 0.0934, 0.2750, 0.2311, 0.2378],
[0.2246, 0.2934, 0.3999, 0.0519, 0.4430],
[0.1271, 0.0636, 0.2444, 0.1294, 0.1659],
[0.3494, 0.0372, 0.1326, 0.1908, 0.1295]],
[[0.3363, 0.5984, 0.2090, -0.2695, 0.6584],
[0.3098, 0.3608, 0.3623, 0.0098, 0.5004],
[0.6133, 0.2568, 0.4264, 0.2688, 0.4716],
[0.4058, 0.1438, 0.3043, 0.2127, 0.2971],
[0.6604, 0.3490, 0.5228, 0.2593, 0.5967]],
[[0.4224, 0.1182, 0.4883, 0.3403, 0.3458],
[0.4257, 0.3757, -0.1431, -0.2208, 0.3383],
[0.0681, 0.2540, 0.4165, 0.0269, 0.3934],
[0.5341, 0.3288, 0.3937, 0.1532, 0.5132],
[0.6244, 0.1647, 0.2378, 0.2548, 0.3196]],
[[0.2222, 0.3380, 0.2374, -0.0748, 0.4212],
[0.4042, 0.1373, 0.3308, 0.2317, 0.3011],
[0.4740, 0.4829, -0.0853, -0.2634, 0.4623],
[0.4540, 0.0645, 0.6046, 0.4632, 0.3459],
[0.4744, 0.5098, -0.2441, -0.3713, 0.4265]],
[[0.0314, 0.1189, 0.3825, 0.1119, 0.2548],
[0.7057, 0.2725, 0.2426, 0.1979, 0.4285],
[0.3967, 0.0223, 0.3664, 0.3488, 0.2107],
[0.4311, 0.4695, 0.3035, -0.0640, 0.5914],
[0.0797, 0.1038, 0.3847, 0.1476, 0.2486]],
[[0.3379, 0.3671, 0.3622, 0.0166, 0.5097],
[0.4051, 0.4552, -0.0709, -0.2616, 0.4339],
[0.5379, 0.5037, 0.0074, -0.2046, 0.5243],
[0.0250, 0.0544, 0.3859, 0.1679, 0.1976],
[0.1880, 0.2725, 0.1849, -0.0598, 0.3383]]]
)
self.assertTensorAlmostEqual(proj_keys_targets, self.luong_att.proj_keys)
| 16,419 | 42.786667 | 81 | py |
KSTER | KSTER-main/test/unit/test_helpers.py | import unittest
import torch
class TensorTestCase(unittest.TestCase):
def assertTensorNotEqual(self, expected, actual):
equal = torch.equal(expected, actual)
if equal:
self.fail("Tensors did match but weren't supposed to: expected {},"
" actual {}.".format(expected, actual))
def assertTensorEqual(self, expected, actual):
equal = torch.equal(expected, actual)
if not equal:
self.fail("Tensors didn't match but were supposed to {} vs"
" {}".format(expected, actual))
def assertTensorAlmostEqual(self, expected, actual):
diff = torch.all(
torch.lt(torch.abs(torch.add(expected, -actual)), 1e-4))
if not diff:
self.fail("Tensors didn't match but were supposed to {} vs"
" {}".format(expected, actual))
| 879 | 34.2 | 79 | py |
KSTER | KSTER-main/scripts/average_checkpoints.py | #!/usr/bin/env python3
# coding: utf-8
"""
Checkpoint averaging
Mainly follows:
https://github.com/pytorch/fairseq/blob/master/scripts/average_checkpoints.py
"""
import argparse
import collections
import torch
from typing import List
def average_checkpoints(inputs: List[str]) -> dict:
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(
s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
# Averaging: only handle the network params.
model_params = state['model_state']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
# v should be a list of torch Tensor.
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state['model_state'] = averaged_params
return new_state
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint to this path.')
args = parser.parse_args()
print(args)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| 3,018 | 30.123711 | 79 | py |
KSTER | KSTER-main/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
print(sys.path)
# -- Project information -----------------------------------------------------
project = 'Joey NMT'
copyright = '2018, Jasmijn Bastings and Julia Kreutzer'
author = 'Jasmijn Bastings and Julia Kreutzer'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'JoeyNMTdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JoeyNMT.tex', 'Joey NMT Documentation',
'Joost Bastings and Julia Kreutzer', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'joeynmt', 'Joey NMT Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JoeyNMT', 'Joey NMT Documentation',
author, 'JoeyNMT', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 5,375 | 28.377049 | 79 | py |
KSTER | KSTER-main/joeynmt/vocabulary.py | # coding: utf-8
"""
Vocabulary module
"""
from collections import defaultdict, Counter
from typing import List
import numpy as np
from torchtext.data import Dataset
from joeynmt.constants import UNK_TOKEN, DEFAULT_UNK_ID, \
EOS_TOKEN, BOS_TOKEN, PAD_TOKEN
class Vocabulary:
""" Vocabulary represents mapping between tokens and indices. """
def __init__(self, tokens: List[str] = None, file: str = None) -> None:
"""
Create vocabulary from list of tokens or file.
Special tokens are added if not already in file or list.
File format: token with index i is in line i.
:param tokens: list of tokens
:param file: file to load vocabulary from
"""
# don't rename stoi and itos since needed for torchtext
# warning: stoi grows with unknown tokens, don't use for saving or size
# special symbols
self.specials = [UNK_TOKEN, PAD_TOKEN, BOS_TOKEN, EOS_TOKEN]
self.stoi = defaultdict(DEFAULT_UNK_ID)
self.itos = []
if tokens is not None:
self._from_list(tokens)
elif file is not None:
self._from_file(file)
def _from_list(self, tokens: List[str] = None) -> None:
"""
Make vocabulary from list of tokens.
Tokens are assumed to be unique and pre-selected.
Special symbols are added if not in list.
:param tokens: list of tokens
"""
self.add_tokens(tokens=self.specials+tokens)
assert len(self.stoi) == len(self.itos)
def _from_file(self, file: str) -> None:
"""
Make vocabulary from contents of file.
File format: token with index i is in line i.
:param file: path to file where the vocabulary is loaded from
"""
tokens = []
with open(file, "r") as open_file:
for line in open_file:
tokens.append(line.strip("\n"))
self._from_list(tokens)
def __str__(self) -> str:
return self.stoi.__str__()
def to_file(self, file: str) -> None:
"""
Save the vocabulary to a file, by writing token with index i in line i.
:param file: path to file where the vocabulary is written
"""
with open(file, "w") as open_file:
for t in self.itos:
open_file.write("{}\n".format(t))
def add_tokens(self, tokens: List[str]) -> None:
"""
Add list of tokens to vocabulary
:param tokens: list of tokens to add to the vocabulary
"""
for t in tokens:
new_index = len(self.itos)
# add to vocab if not already there
if t not in self.itos:
self.itos.append(t)
self.stoi[t] = new_index
def is_unk(self, token: str) -> bool:
"""
Check whether a token is covered by the vocabulary
:param token:
:return: True if covered, False otherwise
"""
return self.stoi[token] == DEFAULT_UNK_ID()
def __len__(self) -> int:
return len(self.itos)
def array_to_sentence(self, array: np.array, cut_at_eos=True,
skip_pad=True) -> List[str]:
"""
Converts an array of IDs to a sentence, optionally cutting the result
off at the end-of-sequence token.
:param array: 1D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:param skip_pad: skip generated <pad> tokens
:return: list of strings (tokens)
"""
sentence = []
for i in array:
s = self.itos[i]
if cut_at_eos and s == EOS_TOKEN:
break
if skip_pad and s == PAD_TOKEN:
continue
sentence.append(s)
return sentence
def arrays_to_sentences(self, arrays: np.array, cut_at_eos=True,
skip_pad=True) -> List[List[str]]:
"""
Convert multiple arrays containing sequences of token IDs to their
sentences, optionally cutting them off at the end-of-sequence token.
:param arrays: 2D array containing indices
:param cut_at_eos: cut the decoded sentences at the first <eos>
:param skip_pad: skip generated <pad> tokens
:return: list of list of strings (tokens)
"""
sentences = []
for array in arrays:
sentences.append(
self.array_to_sentence(array=array, cut_at_eos=cut_at_eos,
skip_pad=skip_pad))
return sentences
def build_vocab(field: str, max_size: int, min_freq: int, dataset: Dataset,
vocab_file: str = None) -> Vocabulary:
"""
Builds vocabulary for a torchtext `field` from given`dataset` or
`vocab_file`.
:param field: attribute e.g. "src"
:param max_size: maximum size of vocabulary
:param min_freq: minimum frequency for an item to be included
:param dataset: dataset to load data for field from
:param vocab_file: file to store the vocabulary,
if not None, load vocabulary from here
:return: Vocabulary created from either `dataset` or `vocab_file`
"""
if vocab_file is not None:
# load it from file
vocab = Vocabulary(file=vocab_file)
else:
# create newly
def filter_min(counter: Counter, min_freq: int):
""" Filter counter by min frequency """
filtered_counter = Counter({t: c for t, c in counter.items()
if c >= min_freq})
return filtered_counter
def sort_and_cut(counter: Counter, limit: int):
""" Cut counter to most frequent,
sorted numerically and alphabetically"""
# sort by frequency, then alphabetically
tokens_and_frequencies = sorted(counter.items(),
key=lambda tup: tup[0])
tokens_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
vocab_tokens = [i[0] for i in tokens_and_frequencies[:limit]]
return vocab_tokens
tokens = []
for i in dataset.examples:
if field == "src":
tokens.extend(i.src)
elif field == "trg":
tokens.extend(i.trg)
counter = Counter(tokens)
if min_freq > -1:
counter = filter_min(counter, min_freq)
vocab_tokens = sort_and_cut(counter, max_size)
assert len(vocab_tokens) <= max_size
vocab = Vocabulary(tokens=vocab_tokens)
assert len(vocab) <= max_size + len(vocab.specials)
assert vocab.itos[DEFAULT_UNK_ID()] == UNK_TOKEN
# check for all except for UNK token whether they are OOVs
for s in vocab.specials[1:]:
assert not vocab.is_unk(s)
return vocab
| 6,887 | 33.09901 | 79 | py |
KSTER | KSTER-main/joeynmt/build_database.py | import torch
import numpy as np
import logging
from hashlib import md5
from joeynmt.prediction import parse_test_args
from joeynmt.helpers import load_config, load_checkpoint, get_latest_checkpoint
from joeynmt.data import load_data, Dataset, make_data_iter
from joeynmt.model import build_model, _DataParallel, Model
from joeynmt.batch import Batch
from joeynmt.constants import PAD_TOKEN, BOS_TOKEN, EOS_TOKEN
from joeynmt.faiss_index import FaissIndex
from npy_append_array import NpyAppendArray
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(filename)s:%(lineno)d:\n[%(levelname)s]:%(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
stream_handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
logger.propagate = False
def generate_id(sequence: str) -> str:
return md5(sequence.encode()).hexdigest()
# pylint: disable=too-many-arguments,too-many-locals,no-member
def store_examples(model: Model, embedding_path: str, token_map_path: str, data: Dataset, batch_size: int,
use_cuda: bool, level: str, n_gpu: int, batch_class: Batch, batch_type: str) \
-> None:
"""
Extract hidden states generated by trained model and sent them to kafka.
:param model: model module
:param data: dataset for validation
:param batch_size: validation batch size
:param batch_class: class type of batch
:param use_cuda: if True, use CUDA
:param level: segmentation level, one of "char", "bpe", "word"
:param eval_metric: evaluation metric, e.g. "bleu"
:param n_gpu: number of GPUs
"""
assert batch_size >= n_gpu, "batch_size must be bigger than n_gpu."
if batch_size > 1000 and batch_type == "sentence":
logger.warning(
"WARNING: Are you sure you meant to work on huge batches like "
"this? 'batch_size' is > 1000 for sentence-batching. "
"Consider decreasing it or switching to"
" 'eval_batch_type: token'.")
valid_iter = make_data_iter(
dataset=data, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
total_doc = 0
sentence_count = 0
disp_step = 100
npaa = NpyAppendArray(embedding_path)
token_map_file = open(token_map_path, "w", encoding="utf-8")
id_st = set()
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg.cpu().numpy()
batch_src_texts = model.src_vocab.arrays_to_sentences(arrays=batch.src, cut_at_eos=True)
batch_trg_texts = model.trg_vocab.arrays_to_sentences(arrays=batch.trg, cut_at_eos=False)
sentence_count += batch.nseqs
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
_, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_hidden_states = batch_hidden_states[sort_reverse_index].cpu().numpy().astype(np.float16)
for i in range(len(batch_trg_texts)):
src_text = " ".join(batch_src_texts[i])
trg_tokens = batch_trg_texts[i][0: trg_length[i] - 1]
trg_token_ids = trg[i][0: trg_length[i] - 1].tolist()
hidden_states = batch_hidden_states[i][0: trg_length[i] - 1]
sequence = src_text + BOS_TOKEN
for token, token_id, embedding in zip(trg_tokens, trg_token_ids, hidden_states):
_id = generate_id(sequence)
if _id in id_st:
continue
else:
id_st.add(_id)
npaa.append(embedding[np.newaxis, :])
token_map_file.write(f"{token_id}\n")
sequence += token
total_doc += 1
if step % disp_step == 0 and step > 0:
logger.info(f"save {sentence_count} sentences with {total_doc} tokens")
del npaa
token_map_file.close()
logger.info(f"save {sentence_count} sentences with {total_doc} tokens")
def build_database(cfg_file: str, ckpt: str, division: str, index_path: str, embedding_path: str, token_map_path: str,
batch_class: Batch = Batch, datasets: dict = None) -> None:
"""
The function to store hidden states generated from trained transformer model.
Handles loading a model from checkpoint, generating hidden states by force decoding and storing them.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
"""
logger.info("load config")
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
assert division in ["train", "dev", "test"]
logger.info(division)
# when checkpoint is not specified, take latest (best) from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
try:
step = ckpt.split(model_dir+"/")[1].split(".ckpt")[0]
except IndexError:
step = "best"
# load the data
logger.info("load data")
if datasets is None:
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["train", "dev", "test"])
data_to_predict = {"train": train_data, "dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"train": datasets["train"], "dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
logger.info("load checkpoints")
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
if not data_set_name == division:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)..." % (data_set_name, dataset_file))
logger.info("store examples")
store_examples(model, embedding_path=embedding_path, token_map_path=token_map_path, data=data_set,
batch_size=batch_size, batch_class=batch_class, batch_type=batch_type, level=level,
use_cuda=use_cuda, n_gpu=n_gpu)
logger.info("train index")
index = FaissIndex()
index.train(embedding_path)
index.add(embedding_path)
index.export(index_path)
del index | 7,739 | 39.52356 | 119 | py |
KSTER | KSTER-main/joeynmt/prediction.py | # coding: utf-8
"""
This modules holds methods for generating predictions from a model.
"""
import os
import sys
from typing import List, Optional
import logging
import numpy as np
import json
import torch
from torchtext.data import Dataset, Field
from joeynmt.helpers import bpe_postprocess, check_combiner_cfg, load_config, make_logger,\
get_latest_checkpoint, load_checkpoint, store_attention_plots,\
get_sacrebleu_description
from joeynmt.metrics import bleu, chrf, token_accuracy, sequence_accuracy
from joeynmt.model import build_model, Model, _DataParallel
from joeynmt.search import run_batch
from joeynmt.batch import Batch
from joeynmt.data import load_data, make_data_iter, MonoDataset
from joeynmt.constants import UNK_TOKEN, PAD_TOKEN, EOS_TOKEN
from joeynmt.vocabulary import Vocabulary
from joeynmt.combiners import build_combiner, NoCombiner
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments,too-many-locals,no-member
def validate_on_data(model: Model, data: Dataset,
batch_size: int,
use_cuda: bool, max_output_length: int,
level: str, eval_metric: Optional[str],
n_gpu: int,
batch_class: Batch = Batch,
compute_loss: bool = False,
beam_size: int = 1, beam_alpha: int = -1,
batch_type: str = "sentence",
postprocess: bool = True,
bpe_type: str = "subword-nmt",
sacrebleu: dict = None) \
-> (float, float, float, List[str], List[List[str]], List[str],
List[str], List[List[str]], List[np.array]):
"""
Generate translations for the given data.
If `compute_loss` is True and references are given,
also compute the loss.
:param model: model module
:param data: dataset for validation
:param batch_size: validation batch size
:param batch_class: class type of batch
:param use_cuda: if True, use CUDA
:param max_output_length: maximum length for generated hypotheses
:param level: segmentation level, one of "char", "bpe", "word"
:param eval_metric: evaluation metric, e.g. "bleu"
:param n_gpu: number of GPUs
:param compute_loss: whether to computes a scalar loss
for given inputs and targets
:param beam_size: beam size for validation.
If <2 then greedy decoding (default).
:param beam_alpha: beam search alpha for length penalty,
disabled if set to -1 (default).
:param batch_type: validation batch type (sentence or token)
:param postprocess: if True, remove BPE segmentation from translations
:param bpe_type: bpe type, one of {"subword-nmt", "sentencepiece"}
:param sacrebleu: sacrebleu options
:return:
- current_valid_score: current validation score [eval_metric],
- valid_loss: validation loss,
- valid_ppl:, validation perplexity,
- valid_sources: validation sources,
- valid_sources_raw: raw validation sources (before post-processing),
- valid_references: validation references,
- valid_hypotheses: validation_hypotheses,
- decoded_valid: raw validation hypotheses (before post-processing),
- valid_attention_scores: attention scores for validation hypotheses
"""
assert batch_size >= n_gpu, "batch_size must be bigger than n_gpu."
if sacrebleu is None: # assign default value
sacrebleu = {"remove_whitespace": True, "tokenize": "13a", "use_detokenization": False}
if batch_size > 1000 and batch_type == "sentence":
logger.warning(
"WARNING: Are you sure you meant to work on huge batches like "
"this? 'batch_size' is > 1000 for sentence-batching. "
"Consider decreasing it or switching to"
" 'eval_batch_type: token'.")
valid_iter = make_data_iter(
dataset=data, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# check combiner
if not hasattr(model, "combiner"):
model.combiner = NoCombiner()
# don't track gradients during validation
with torch.no_grad():
all_outputs = []
valid_attention_scores = []
total_loss = 0
total_ntokens = 0
total_nseqs = 0
for valid_batch in iter(valid_iter):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
# run as during training with teacher forcing
if compute_loss and batch.trg is not None:
batch_loss, _, _, _ = model(return_type="combiner_loss", **vars(batch))
if n_gpu > 1:
batch_loss = batch_loss.mean() # average on multi-gpu
total_loss += batch_loss
total_ntokens += batch.ntokens
total_nseqs += batch.nseqs
# run as during inference to produce translations
output, attention_scores = run_batch(
model=model, batch=batch, beam_size=beam_size,
beam_alpha=beam_alpha, max_output_length=max_output_length)
# sort outputs back to original order
all_outputs.extend(output[sort_reverse_index])
valid_attention_scores.extend(
attention_scores[sort_reverse_index]
if attention_scores is not None else [])
assert len(all_outputs) == len(data)
if compute_loss and total_ntokens > 0:
# total validation loss
valid_loss = total_loss
# exponent of token-level negative log prob
valid_ppl = torch.exp(total_loss / total_ntokens)
else:
valid_loss = -1
valid_ppl = -1
# decode back to symbols
decoded_valid = model.trg_vocab.arrays_to_sentences(arrays=all_outputs,
cut_at_eos=True)
# evaluate with metric on full dataset
join_char = " " if level in ["word", "bpe"] else ""
valid_sources = [join_char.join(s) for s in data.src]
valid_references = [join_char.join(t) for t in data.trg]
valid_hypotheses = [join_char.join(t) for t in decoded_valid]
# post-process
if level == "bpe" and postprocess:
valid_sources = [bpe_postprocess(s, bpe_type=bpe_type)
for s in valid_sources]
valid_references = [bpe_postprocess(v, bpe_type=bpe_type)
for v in valid_references]
valid_hypotheses = [bpe_postprocess(v, bpe_type=bpe_type)
for v in valid_hypotheses]
if sacrebleu["use_detokenization"]:
batch_src_detokenize = sacrebleu["batch_src_detokenize"]
batch_trg_detokenize = sacrebleu["batch_trg_detokenize"]
valid_sources = batch_src_detokenize(valid_sources)
valid_references = batch_trg_detokenize(valid_references)
valid_hypotheses = batch_trg_detokenize(valid_hypotheses)
# if references are given, evaluate against them
if valid_references:
assert len(valid_hypotheses) == len(valid_references)
current_valid_score = 0
if eval_metric.lower() == 'bleu':
# this version does not use any tokenization
current_valid_score = bleu(
valid_hypotheses, valid_references,
tokenize=sacrebleu["tokenize"])
elif eval_metric.lower() == 'chrf':
current_valid_score = chrf(valid_hypotheses, valid_references,
remove_whitespace=sacrebleu["remove_whitespace"])
elif eval_metric.lower() == 'token_accuracy':
current_valid_score = token_accuracy( # supply List[List[str]]
list(decoded_valid), list(data.trg))
elif eval_metric.lower() == 'sequence_accuracy':
current_valid_score = sequence_accuracy(
valid_hypotheses, valid_references)
else:
current_valid_score = -1
return current_valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
decoded_valid, valid_attention_scores
def parse_test_args(cfg, mode="test"):
"""
parse test args
:param cfg: config object
:param mode: 'test' or 'translate'
:return:
"""
if "test" not in cfg["data"].keys():
raise ValueError("Test data must be specified in config.")
batch_size = cfg["training"].get(
"eval_batch_size", cfg["training"].get("batch_size", 1))
batch_type = cfg["training"].get(
"eval_batch_type", cfg["training"].get("batch_type", "sentence"))
use_cuda = (cfg["training"].get("use_cuda", False)
and torch.cuda.is_available())
device = torch.device("cuda" if use_cuda else "cpu")
if mode == 'test':
n_gpu = torch.cuda.device_count() if use_cuda else 0
k = cfg["testing"].get("beam_size", 1)
batch_per_device = batch_size*k // n_gpu if n_gpu > 1 else batch_size*k
logger.info("Process device: %s, n_gpu: %d, "
"batch_size per device: %d (with beam_size)",
device, n_gpu, batch_per_device)
eval_metric = cfg["training"]["eval_metric"]
elif mode == 'translate':
# in multi-gpu, batch_size must be bigger than n_gpu!
n_gpu = 1 if use_cuda else 0
logger.debug("Process device: %s, n_gpu: %d", device, n_gpu)
eval_metric = ""
level = cfg["data"]["level"]
max_output_length = cfg["training"].get("max_output_length", None)
# whether to use beam search for decoding, 0: greedy decoding
if "testing" in cfg.keys():
beam_size = cfg["testing"].get("beam_size", 1)
beam_alpha = cfg["testing"].get("alpha", -1)
postprocess = cfg["testing"].get("postprocess", True)
bpe_type = cfg["testing"].get("bpe_type", "subword-nmt")
sacrebleu = get_sacrebleu_description(cfg)
else:
beam_size = 1
beam_alpha = -1
postprocess = True
bpe_type = "subword-nmt"
sacrebleu = {"remove_whitespace": True, "tokenize": "13a", "use_detokenization": False}
decoding_description = "Greedy decoding" if beam_size < 2 else \
"Beam search decoding with beam size = {} and alpha = {}". \
format(beam_size, beam_alpha)
tokenizer_info = f"[{sacrebleu['tokenize']}]" \
if eval_metric == "bleu" else ""
return batch_size, batch_type, use_cuda, device, n_gpu, level, \
eval_metric, max_output_length, beam_size, beam_alpha, \
postprocess, bpe_type, sacrebleu, decoding_description, \
tokenizer_info
# pylint: disable-msg=logging-too-many-args
def test(cfg_file,
ckpt: str,
combiner_cfg: dict,
batch_class: Batch = Batch,
output_path: str = None,
save_attention: bool = False,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# when checkpoint is not specified, take latest (best) from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
try:
step = ckpt.split(model_dir+"/")[1].split(".ckpt")[0]
except IndexError:
step = "best"
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Decoding on %s set (%s)...", data_set_name, dataset_file)
#pylint: disable=unused-variable
score, loss, ppl, sources, sources_raw, references, hypotheses, \
hypotheses_raw, attention_scores = validate_on_data(
model, data=data_set, batch_size=batch_size,
batch_class=batch_class, batch_type=batch_type, level=level,
max_output_length=max_output_length, eval_metric=eval_metric,
use_cuda=use_cuda, compute_loss=False, beam_size=beam_size,
beam_alpha=beam_alpha, postprocess=postprocess,
bpe_type=bpe_type, sacrebleu=sacrebleu, n_gpu=n_gpu)
#pylint: enable=unused-variable
if "trg" in data_set.fields:
logger.info("%4s %s%s: %6.2f [%s]",
data_set_name, eval_metric, tokenizer_info,
score, decoding_description)
else:
logger.info("No references given for %s -> no evaluation.",
data_set_name)
if save_attention:
if attention_scores:
attention_name = "{}.{}.att".format(data_set_name, step)
attention_path = os.path.join(model_dir, attention_name)
logger.info("Saving attention plots. This might take a while..")
store_attention_plots(attentions=attention_scores,
targets=hypotheses_raw,
sources=data_set.src,
indices=range(len(hypotheses)),
output_prefix=attention_path)
logger.info("Attention plots saved to: %s", attention_path)
else:
logger.warning("Attention scores could not be saved. "
"Note that attention scores are not available "
"when using beam search. "
"Set beam_size to 1 for greedy decoding.")
if output_path is not None:
output_path_set = "{}.{}".format(output_path, data_set_name)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for hyp in hypotheses:
out_file.write(hyp + "\n")
logger.info("Translations saved to: %s", output_path_set)
# pylint: disable-msg=logging-too-many-args
def analyze(cfg_file,
ckpt: str,
combiner_cfg: dict,
output_path: str,
batch_class: Batch = Batch,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)...", data_set_name, dataset_file)
valid_iter = make_data_iter(
dataset=data_set, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data_set.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
all_docs = []
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg
batch_src_texts = model.src_vocab.arrays_to_sentences(arrays=batch.src, cut_at_eos=True)
batch_trg_texts = model.trg_vocab.arrays_to_sentences(arrays=batch.trg, cut_at_eos=False)
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
batch_logits, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_logits = batch_logits[sort_reverse_index]
batch_hidden_states = batch_hidden_states[sort_reverse_index]
batch_mixed_distribution, batch_model_based_distribution, example_based_distribution, batch_mixing_weight, batch_bandwidth = model.combiner.detailed_forward(batch_hidden_states, batch_logits)
batch_preds, batch_model_preds, batch_example_preds = batch_mixed_distribution.argmax(dim=-1), batch_model_based_distribution.argmax(dim=-1), example_based_distribution.argmax(dim=-1)
batch_trg_probs = torch.gather(batch_mixed_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_trg_model_based_distribution = torch.gather(batch_model_based_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_trg_example_based_distribution = torch.gather(example_based_distribution, -1, trg.unsqueeze(-1)).squeeze(-1)
batch_preds, batch_model_preds, batch_example_preds = batch_preds.cpu().numpy(), batch_model_preds.cpu().numpy(), batch_example_preds.cpu().numpy()
batch_trg_probs, batch_trg_model_based_distribution, batch_trg_example_based_distribution = batch_trg_probs.cpu().numpy(), batch_trg_model_based_distribution.cpu().numpy(), batch_trg_example_based_distribution.cpu().numpy()
batch_mixing_weight, batch_bandwidth = batch_mixing_weight.cpu().numpy(), batch_bandwidth.cpu().numpy()
for i in range(len(batch_trg_texts)):
trg_len = trg_length[i] - 1
doc = {
"src": " ".join(batch_src_texts[i]),
"trg_tokens": batch_trg_texts[i][0: trg_len],
"preds": batch_preds[i][0: trg_len].tolist(),
"model_preds": batch_model_preds[i][0: trg_len].tolist(),
"example_preds": batch_example_preds[i][0: trg_len].tolist(),
"trg_probs": batch_trg_probs[i][0: trg_len].tolist(),
"trg_model_probs": batch_trg_model_based_distribution[i][0: trg_len].tolist(),
"trg_example_probs": batch_trg_example_based_distribution[i][0: trg_len].tolist(),
"mixing_weight": batch_mixing_weight[i][0: trg_len].tolist(),
"bandwidth": batch_bandwidth[i][0: trg_len].tolist()
}
all_docs.append(json.dumps(doc))
output_path_set = "{}.{}".format(output_path, data_set_name)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for doc in all_docs:
out_file.write(doc + "\n")
logger.info("Analysis saved to: %s", output_path_set)
# pylint: disable-msg=logging-too-many-args
def score_translations(cfg_file,
ckpt: str,
combiner_cfg: dict,
output_path: str,
batch_class: Batch = Batch,
datasets: dict = None) -> None:
"""
Main test function. Handles loading a model from checkpoint, generating
translations and storing them and attention plots.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param batch_class: class type of batch
:param output_path: path to output
:param datasets: datasets to predict
:param save_attention: whether to save the computed attention weights
"""
from joeynmt.loss import XentLoss
from joeynmt.constants import PAD_TOKEN
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
if len(logger.handlers) == 0:
_ = make_logger(model_dir, mode="test") # version string returned
# load the data
if datasets is None:
_, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"], datasets=["dev", "test"])
data_to_predict = {"dev": dev_data, "test": test_data}
else: # avoid to load data again
data_to_predict = {"dev": datasets["dev"], "test": datasets["test"]}
src_vocab = datasets["src_vocab"]
trg_vocab = datasets["trg_vocab"]
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, eval_metric, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, decoding_description, tokenizer_info \
= parse_test_args(cfg, mode="test")
batch_size = 1
batch_type = "sentence"
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
if combiner_cfg["type"]=="dynamic_combiner":
combiner_checkpoint = load_checkpoint(combiner_cfg["combiner_path"], use_cuda=use_cuda)
combiner.load_state_dict(combiner_checkpoint["model_state"])
model.combiner = combiner
loss_function = XentLoss(pad_index=model.pad_index, smoothing=0.1)
if use_cuda:
model.to(device)
# multi-gpu eval
if n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = _DataParallel(model)
for data_set_name, data_set in data_to_predict.items():
if data_set is None:
continue
if data_set_name == "dev":
continue
dataset_file = cfg["data"][data_set_name] + "." + cfg["data"]["trg"]
logger.info("Force decoding on %s set (%s)...", data_set_name, dataset_file)
valid_iter = make_data_iter(
dataset=data_set, batch_size=batch_size, batch_type=batch_type,
shuffle=False, train=False)
valid_sources_raw = data_set.src
pad_index = model.src_vocab.stoi[PAD_TOKEN]
# disable dropout
model.eval()
# don't track gradients during validation
losses = []
with torch.no_grad():
for step, valid_batch in enumerate(iter(valid_iter)):
# run as during training to get validation loss (e.g. xent)
batch = batch_class(valid_batch, pad_index, use_cuda=use_cuda)
trg_length = batch.trg_length.cpu().numpy().tolist()
trg = batch.trg
# sort batch now by src length and keep track of order
sort_reverse_index = batch.sort_by_src_length()
batch_logits, batch_hidden_states, _, _ = model._encode_decode(**vars(batch))
batch_logits = batch_logits[sort_reverse_index]
batch_hidden_states = batch_hidden_states[sort_reverse_index]
batch_log_probs = model.combiner(batch_hidden_states, batch_logits)
loss = loss_function(batch_log_probs, trg).item()
losses.append(loss)
if output_path != None:
with open(output_path, mode="w", encoding="utf-8") as out_file:
for loss in losses:
out_file.write(f"{loss}\n")
logger.info(f"Losses saved to: {output_path}")
def translate(cfg_file: str,
ckpt: str,
output_path: str = None,
batch_class: Batch = Batch) -> None:
"""
Interactive translation function.
Loads model from checkpoint and translates either the stdin input or
asks for input to translate interactively.
The input has to be pre-processed according to the data that the model
was trained on, i.e. tokenized or split into subwords.
Translations are printed to stdout.
:param cfg_file: path to configuration file
:param ckpt: path to checkpoint to load
:param output_path: path to output file
:param batch_class: class type of batch
"""
def _load_line_as_data(line):
""" Create a dataset from one line via a temporary file. """
# write src input to temporary file
tmp_name = "tmp"
tmp_suffix = ".src"
tmp_filename = tmp_name+tmp_suffix
with open(tmp_filename, "w") as tmp_file:
tmp_file.write("{}\n".format(line))
test_data = MonoDataset(path=tmp_name, ext=tmp_suffix,
field=src_field)
# remove temporary file
if os.path.exists(tmp_filename):
os.remove(tmp_filename)
return test_data
def _translate_data(test_data):
""" Translates given dataset, using parameters from outer scope. """
# pylint: disable=unused-variable
score, loss, ppl, sources, sources_raw, references, hypotheses, \
hypotheses_raw, attention_scores = validate_on_data(
model, data=test_data, batch_size=batch_size,
batch_class=batch_class, batch_type=batch_type, level=level,
max_output_length=max_output_length, eval_metric="",
use_cuda=use_cuda, compute_loss=False, beam_size=beam_size,
beam_alpha=beam_alpha, postprocess=postprocess,
bpe_type=bpe_type, sacrebleu=sacrebleu, n_gpu=n_gpu)
return hypotheses
cfg = load_config(cfg_file)
model_dir = cfg["training"]["model_dir"]
_ = make_logger(model_dir, mode="translate")
# version string returned
# when checkpoint is not specified, take oldest from model dir
if ckpt is None:
ckpt = get_latest_checkpoint(model_dir)
# read vocabs
src_vocab_file = cfg["data"].get("src_vocab", model_dir + "/src_vocab.txt")
trg_vocab_file = cfg["data"].get("trg_vocab", model_dir + "/trg_vocab.txt")
src_vocab = Vocabulary(file=src_vocab_file)
trg_vocab = Vocabulary(file=trg_vocab_file)
data_cfg = cfg["data"]
level = data_cfg["level"]
lowercase = data_cfg["lowercase"]
tok_fun = lambda s: list(s) if level == "char" else s.split()
src_field = Field(init_token=None, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
batch_first=True, lower=lowercase,
unk_token=UNK_TOKEN,
include_lengths=True)
src_field.vocab = src_vocab
# parse test args
batch_size, batch_type, use_cuda, device, n_gpu, level, _, \
max_output_length, beam_size, beam_alpha, postprocess, \
bpe_type, sacrebleu, _, _ = parse_test_args(cfg, mode="translate")
# load model state from disk
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda:
model.to(device)
if not sys.stdin.isatty():
# input file given
test_data = MonoDataset(path=sys.stdin, ext="", field=src_field)
hypotheses = _translate_data(test_data)
if output_path is not None:
# write to outputfile if given
output_path_set = "{}".format(output_path)
with open(output_path_set, mode="w", encoding="utf-8") as out_file:
for hyp in hypotheses:
out_file.write(hyp + "\n")
logger.info("Translations saved to: %s.", output_path_set)
else:
# print to stdout
for hyp in hypotheses:
print(hyp)
else:
# enter interactive mode
batch_size = 1
batch_type = "sentence"
while True:
try:
src_input = input("\nPlease enter a source sentence "
"(pre-processed): \n")
if not src_input.strip():
break
# every line has to be made into dataset
test_data = _load_line_as_data(line=src_input)
hypotheses = _translate_data(test_data)
print("JoeyNMT: {}".format(hypotheses[0]))
except (KeyboardInterrupt, EOFError):
print("\nBye.")
break
| 32,482 | 41.406005 | 239 | py |
KSTER | KSTER-main/joeynmt/batch.py | # coding: utf-8
"""
Implementation of a mini-batch.
"""
import torch
class Batch:
"""Object for holding a batch of data with mask during training.
Input is a batch from a torch text iterator.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, torch_batch, pad_index, use_cuda=False):
"""
Create a new joey batch from a torch batch.
This batch extends torch text's batch attributes with src and trg
length, masks, number of non-padded tokens in trg.
Furthermore, it can be sorted by src length.
:param torch_batch:
:param pad_index:
:param use_cuda:
"""
self.src, self.src_length = torch_batch.src
self.src_mask = (self.src != pad_index).unsqueeze(1)
self.nseqs = self.src.size(0)
self.trg_input = None
self.trg = None
self.trg_mask = None
self.trg_length = None
self.ntokens = None
self.use_cuda = use_cuda
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if hasattr(torch_batch, "trg"):
trg, trg_length = torch_batch.trg
# trg_input is used for teacher forcing, last one is cut off
self.trg_input = trg[:, :-1]
self.trg_length = trg_length
# trg is used for loss computation, shifted by one since BOS
self.trg = trg[:, 1:]
# we exclude the padded areas from the loss computation
self.trg_mask = (self.trg_input != pad_index).unsqueeze(1)
self.ntokens = (self.trg != pad_index).data.sum().item()
if self.use_cuda:
self._make_cuda()
def _make_cuda(self):
"""
Move the batch to GPU
:return:
"""
self.src = self.src.to(self.device)
self.src_mask = self.src_mask.to(self.device)
self.src_length = self.src_length.to(self.device)
if self.trg_input is not None:
self.trg_input = self.trg_input.to(self.device)
self.trg = self.trg.to(self.device)
self.trg_mask = self.trg_mask.to(self.device)
def sort_by_src_length(self):
"""
Sort by src length (descending) and return index to revert sort
:return:
"""
_, perm_index = self.src_length.sort(0, descending=True)
rev_index = [0]*perm_index.size(0)
for new_pos, old_pos in enumerate(perm_index.cpu().numpy()):
rev_index[old_pos] = new_pos
sorted_src_length = self.src_length[perm_index]
sorted_src = self.src[perm_index]
sorted_src_mask = self.src_mask[perm_index]
if self.trg_input is not None:
sorted_trg_input = self.trg_input[perm_index]
sorted_trg_length = self.trg_length[perm_index]
sorted_trg_mask = self.trg_mask[perm_index]
sorted_trg = self.trg[perm_index]
self.src = sorted_src
self.src_length = sorted_src_length
self.src_mask = sorted_src_mask
if self.trg_input is not None:
self.trg_input = sorted_trg_input
self.trg_mask = sorted_trg_mask
self.trg_length = sorted_trg_length
self.trg = sorted_trg
if self.use_cuda:
self._make_cuda()
return rev_index
| 3,327 | 32.616162 | 73 | py |
KSTER | KSTER-main/joeynmt/loss.py | # coding: utf-8
"""
Module to implement training loss
"""
import torch
from torch import nn, Tensor
from torch.autograd import Variable
class XentLoss(nn.Module):
"""
Cross-Entropy Loss with optional label smoothing
"""
def __init__(self, pad_index: int, smoothing: float = 0.0):
super().__init__()
self.smoothing = smoothing
self.pad_index = pad_index
if self.smoothing <= 0.0:
# standard xent loss
self.criterion = nn.NLLLoss(ignore_index=self.pad_index,
reduction='sum')
else:
# custom label-smoothed loss, computed with KL divergence loss
self.criterion = nn.KLDivLoss(reduction='sum')
def _smooth_targets(self, targets: Tensor, vocab_size: int):
"""
Smooth target distribution. All non-reference words get uniform
probability mass according to "smoothing".
:param targets: target indices, batch*seq_len
:param vocab_size: size of the output vocabulary
:return: smoothed target distributions, batch*seq_len x vocab_size
"""
# batch*seq_len x vocab_size
smooth_dist = targets.new_zeros((targets.size(0), vocab_size)).float()
# fill distribution uniformly with smoothing
smooth_dist.fill_(self.smoothing / (vocab_size - 2))
# assign true label the probability of 1-smoothing ("confidence")
smooth_dist.scatter_(1, targets.unsqueeze(1).data, 1.0-self.smoothing)
# give padding probability of 0 everywhere
smooth_dist[:, self.pad_index] = 0
# masking out padding area (sum of probabilities for padding area = 0)
padding_positions = torch.nonzero(targets.data == self.pad_index,
as_tuple=False)
# pylint: disable=len-as-condition
if len(padding_positions) > 0:
smooth_dist.index_fill_(0, padding_positions.squeeze(), 0.0)
return Variable(smooth_dist, requires_grad=False)
# pylint: disable=arguments-differ
def forward(self, log_probs, targets):
"""
Compute the cross-entropy between logits and targets.
If label smoothing is used, target distributions are not one-hot, but
"1-smoothing" for the correct target token and the rest of the
probability mass is uniformly spread across the other tokens.
:param log_probs: log probabilities as predicted by model
:param targets: target indices
:return:
"""
if self.smoothing > 0:
targets = self._smooth_targets(
targets=targets.contiguous().view(-1),
vocab_size=log_probs.size(-1))
# targets: distributions with batch*seq_len x vocab_size
assert log_probs.contiguous().view(-1, log_probs.size(-1)).shape \
== targets.shape
else:
# targets: indices with batch*seq_len
targets = targets.contiguous().view(-1)
loss = self.criterion(
log_probs.contiguous().view(-1, log_probs.size(-1)), targets)
return loss
| 3,120 | 38.506329 | 78 | py |
KSTER | KSTER-main/joeynmt/embeddings.py | # coding: utf-8
"""
Embedding module
"""
import io
import math
import logging
import torch
from torch import nn, Tensor
from joeynmt.helpers import freeze_params
from joeynmt.vocabulary import Vocabulary
logger = logging.getLogger(__name__)
class Embeddings(nn.Module):
"""
Simple embeddings class
"""
# pylint: disable=unused-argument
def __init__(self,
embedding_dim: int = 64,
scale: bool = False,
vocab_size: int = 0,
padding_idx: int = 1,
freeze: bool = False,
**kwargs):
"""
Create new embeddings for the vocabulary.
Use scaling for the Transformer.
:param embedding_dim:
:param scale:
:param vocab_size:
:param padding_idx:
:param freeze: freeze the embeddings during training
"""
super().__init__()
self.embedding_dim = embedding_dim
self.scale = scale
self.vocab_size = vocab_size
self.lut = nn.Embedding(vocab_size, self.embedding_dim,
padding_idx=padding_idx)
if freeze:
freeze_params(self)
# pylint: disable=arguments-differ
def forward(self, x: Tensor) -> Tensor:
"""
Perform lookup for input `x` in the embedding table.
:param x: index in the vocabulary
:return: embedded representation for `x`
"""
if self.scale:
return self.lut(x) * math.sqrt(self.embedding_dim)
return self.lut(x)
def __repr__(self):
return "%s(embedding_dim=%d, vocab_size=%d)" % (
self.__class__.__name__, self.embedding_dim, self.vocab_size)
#from fairseq
def load_from_file(self, embed_path: str, vocab: Vocabulary):
"""Load pretrained embedding weights from text file.
- First line is expected to contain vocabulary size and dimension.
The dimension has to match the model's specified embedding size,
the vocabulary size is used in logging only.
- Each line should contain word and embedding weights
separated by spaces.
- The pretrained vocabulary items that are not part of the
joeynmt's vocabulary will be ignored (not loaded from the file).
- The initialization (specified in config["model"]["embed_initializer"])
of joeynmt's vocabulary items that are not part of the
pretrained vocabulary will be kept (not overwritten in this func).
- This function should be called after initialization!
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
:param embed_path: embedding weights text file
:param vocab: Vocabulary object
"""
embed_dict = {}
# parse file
with io.open(embed_path, 'r', encoding='utf-8',
errors='ignore') as f_embed:
vocab_size, d = map(int, f_embed.readline().split())
assert self.embedding_dim == d, \
"Embedding dimension doesn't match."
for line in f_embed.readlines():
tokens = line.rstrip().split(' ')
if tokens[0] in vocab.stoi.keys():
embed_dict[tokens[0]] = torch.FloatTensor(
[float(t) for t in tokens[1:]])
logger.warning("Loaded {} of {} ({:%}) tokens "
"in the pre-trained embeddings.".format(
len(embed_dict), vocab_size,
len(embed_dict)/vocab_size))
# assign
for idx in range(len(vocab)):
token = vocab.itos[idx]
if token in embed_dict:
assert self.embedding_dim == len(embed_dict[token])
self.lut.weight.data[idx] = embed_dict[token]
logger.warning("Loaded {} of {} ({:%}) tokens "
"of the JoeyNMT's vocabulary.".format(
len(embed_dict), len(vocab),
len(embed_dict)/len(vocab)))
| 4,157 | 33.363636 | 80 | py |
KSTER | KSTER-main/joeynmt/training.py | # coding: utf-8
"""
Training module
"""
import argparse
import time
import shutil
from typing import List
import logging
import os
import sys
import collections
import pathlib
import numpy as np
import torch
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, latest_checkpoint_update, \
ConfigurationError, get_sacrebleu_description
from joeynmt.model import Model, _DataParallel
from joeynmt.prediction import validate_on_data
from joeynmt.loss import XentLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
# for fp16 training
try:
from apex import amp
amp.register_half_function(torch, "einsum")
except ImportError as no_apex:
# error handling in TrainManager object construction
pass
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class TrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict,
batch_class: Batch = Batch) -> None:
"""
Creates a new TrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
:param batch_class: batch class to encapsulate the torch class
"""
train_config = config["training"]
self.batch_class = batch_class
# files for logging and storing
self.model_dir = train_config["model_dir"]
assert os.path.exists(self.model_dir)
self.logging_freq = train_config.get("logging_freq", 100)
self.valid_report_file = "{}/validations.txt".format(self.model_dir)
self.tb_writer = SummaryWriter(log_dir=self.model_dir + "/tensorboard/")
self.save_latest_checkpoint = train_config.get("save_latest_ckpt", True)
# model
self.model = model
self._log_parameters_list()
# objective
self.label_smoothing = train_config.get("label_smoothing", 0.0)
self.model.loss_function = XentLoss(pad_index=self.model.pad_index,
smoothing=self.label_smoothing)
self.normalization = train_config.get("normalization", "batch")
if self.normalization not in ["batch", "tokens", "none"]:
raise ConfigurationError("Invalid normalization option."
"Valid options: "
"'batch', 'tokens', 'none'.")
# optimization
self.learning_rate_min = train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=train_config)
self.optimizer = build_optimizer(config=train_config,
parameters=model.parameters())
# validation & early stopping
self.validation_freq = train_config.get("validation_freq", 1000)
self.log_valid_sents = train_config.get("print_valid_sents", [0, 1, 2])
self.ckpt_queue = collections.deque(
maxlen=train_config.get("keep_last_ckpts", 5))
self.eval_metric = train_config.get("eval_metric", "bleu")
if self.eval_metric not in [
'bleu', 'chrf', 'token_accuracy', 'sequence_accuracy'
]:
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: 'bleu', 'chrf', "
"'token_accuracy', 'sequence_accuracy'.")
self.early_stopping_metric = train_config.get("early_stopping_metric",
"eval_metric")
# early_stopping_metric decides on how to find the early stopping point:
# ckpts are written when there's a new high/low score for this metric.
# If we schedule after BLEU/chrf/accuracy, we want to maximize the
# score, else we want to minimize it.
if self.early_stopping_metric in ["ppl", "loss"]:
self.minimize_metric = True
elif self.early_stopping_metric == "eval_metric":
if self.eval_metric in [
"bleu", "chrf", "token_accuracy", "sequence_accuracy"
]:
self.minimize_metric = False
# eval metric that has to get minimized (not yet implemented)
else:
self.minimize_metric = True
else:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', 'eval_metric'.")
# eval options
test_config = config["testing"]
self.bpe_type = test_config.get("bpe_type", "subword-nmt")
self.sacrebleu = get_sacrebleu_description(config)
# learning rate scheduling
self.scheduler, self.scheduler_step_at = build_scheduler(
config=train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
# data & batch handling
self.level = config["data"]["level"]
if self.level not in ["word", "bpe", "char"]:
raise ConfigurationError("Invalid segmentation level. "
"Valid options: 'word', 'bpe', 'char'.")
self.shuffle = train_config.get("shuffle", True)
self.epochs = train_config["epochs"]
self.batch_size = train_config["batch_size"]
# Placeholder so that we can use the train_iter in other functions.
self.train_iter = None
self.train_iter_state = None
# per-device batch_size = self.batch_size // self.n_gpu
self.batch_type = train_config.get("batch_type", "sentence")
self.eval_batch_size = train_config.get("eval_batch_size",
self.batch_size)
# per-device eval_batch_size = self.eval_batch_size // self.n_gpu
self.eval_batch_type = train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = train_config.get("max_output_length", None)
# CPU / GPU
self.use_cuda = train_config["use_cuda"] and torch.cuda.is_available()
self.n_gpu = torch.cuda.device_count() if self.use_cuda else 0
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if self.use_cuda:
self.model.to(self.device)
# fp16
self.fp16 = train_config.get("fp16", False)
if self.fp16:
if 'apex' not in sys.modules:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex "
"to use fp16 training.") from no_apex
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# opt level: one of {"O0", "O1", "O2", "O3"}
# see https://nvidia.github.io/apex/amp.html#opt-levels
# initialize training statistics
self.stats = self.TrainStatistics(
steps=0,
stop=False,
total_tokens=0,
best_ckpt_iter=0,
best_ckpt_score=np.inf if self.minimize_metric else -np.inf,
minimize_metric=self.minimize_metric,
max_steps=train_config["max_steps"])
# model parameters
if "load_model" in train_config.keys():
self.init_from_checkpoint(
train_config["load_model"],
reset_best_ckpt=train_config.get("reset_best_ckpt", False),
reset_scheduler=train_config.get("reset_scheduler", False),
reset_optimizer=train_config.get("reset_optimizer", False),
reset_iter_state=train_config.get("reset_iter_state", False))
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
self.model = _DataParallel(self.model)
def _save_checkpoint(self, new_best: bool = True) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
:param new_best: This boolean signals which symlink we will use for the
new checkpoint. If it is true, we update best.ckpt, else latest.ckpt.
"""
model_path = os.path.join(self.model_dir,
"{}.ckpt".format(self.stats.steps))
model_state_dict = self.model.module.state_dict() \
if isinstance(self.model, torch.nn.DataParallel) \
else self.model.state_dict()
state = {
"steps":
self.stats.steps,
"total_tokens":
self.stats.total_tokens,
"best_ckpt_score":
self.stats.best_ckpt_score,
"best_ckpt_iteration":
self.stats.best_ckpt_iter,
"model_state":
model_state_dict,
"optimizer_state":
self.optimizer.state_dict(),
"scheduler_state":
self.scheduler.state_dict() if self.scheduler is not None else None,
'amp_state':
amp.state_dict() if self.fp16 else None,
"train_iter_state":
self.train_iter.state_dict()
}
torch.save(state, model_path)
symlink_target = "{}.ckpt".format(self.stats.steps)
if new_best:
if len(self.ckpt_queue) == self.ckpt_queue.maxlen:
to_delete = self.ckpt_queue.popleft() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
logger.warning(
"Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.append(model_path)
best_path = "{}/best.ckpt".format(self.model_dir)
try:
# create/modify symbolic link for best checkpoint
symlink_update(symlink_target, best_path)
except OSError:
# overwrite best.ckpt
torch.save(state, best_path)
if self.save_latest_checkpoint:
last_path = "{}/latest.ckpt".format(self.model_dir)
previous_path = latest_checkpoint_update(symlink_target, last_path)
# If the last ckpt is in the ckpt_queue, we don't want to delete it.
can_delete = True
for ckpt_path in self.ckpt_queue:
if pathlib.Path(ckpt_path).resolve() == previous_path:
can_delete = False
break
if can_delete and previous_path is not None:
os.remove(previous_path)
def init_from_checkpoint(self,
path: str,
reset_best_ckpt: bool = False,
reset_scheduler: bool = False,
reset_optimizer: bool = False,
reset_iter_state: bool = False) -> None:
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
:param reset_best_ckpt: reset tracking of the best checkpoint,
use for domain adaptation with a new dev
set or when using a new metric for fine-tuning.
:param reset_scheduler: reset the learning rate scheduler, and do not
use the one stored in the checkpoint.
:param reset_optimizer: reset the optimizer, and do not use the one
stored in the checkpoint.
:param reset_iter_state: reset the sampler's internal state and do not
use the one stored in the checkpoint.
"""
logger.info("Loading model from %s", path)
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset_optimizer:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
else:
logger.info("Reset optimizer.")
if not reset_scheduler:
if model_checkpoint["scheduler_state"] is not None and \
self.scheduler is not None:
self.scheduler.load_state_dict(
model_checkpoint["scheduler_state"])
else:
logger.info("Reset scheduler.")
# restore counts
self.stats.steps = model_checkpoint["steps"]
self.stats.total_tokens = model_checkpoint["total_tokens"]
if not reset_best_ckpt:
self.stats.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.stats.best_ckpt_iter = model_checkpoint["best_ckpt_iteration"]
else:
logger.info("Reset tracking of the best checkpoint.")
if (not reset_iter_state
and model_checkpoint.get('train_iter_state', None) is not None):
self.train_iter_state = model_checkpoint["train_iter_state"]
# move parameters to cuda
if self.use_cuda:
self.model.to(self.device)
# fp16
if self.fp16 and model_checkpoint.get("amp_state", None) is not None:
amp.load_state_dict(model_checkpoint['amp_state'])
# pylint: disable=unnecessary-comprehension
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def train_and_validate(self, train_data: Dataset, valid_data: Dataset) \
-> None:
"""
Train the model and validate it from time to time on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
self.train_iter = make_data_iter(train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True,
shuffle=self.shuffle)
if self.train_iter_state is not None:
self.train_iter.load_state_dict(self.train_iter_state)
#################################################################
# simplify accumulation logic:
#################################################################
# for epoch in range(epochs):
# self.model.zero_grad()
# epoch_loss = 0.0
# batch_loss = 0.0
# for i, batch in enumerate(iter(self.train_iter)):
#
# # gradient accumulation:
# # loss.backward() inside _train_step()
# batch_loss += self._train_step(inputs)
#
# if (i + 1) % self.batch_multiplier == 0:
# self.optimizer.step() # update!
# self.model.zero_grad() # reset gradients
# self.steps += 1 # increment counter
#
# epoch_loss += batch_loss # accumulate batch loss
# batch_loss = 0 # reset batch loss
#
# # leftovers are just ignored.
#################################################################
logger.info(
"Train stats:\n"
"\tdevice: %s\n"
"\tn_gpu: %d\n"
"\t16-bits training: %r\n"
"\tgradient accumulation: %d\n"
"\tbatch size per device: %d\n"
"\ttotal batch size (w. parallel & accumulation): %d", self.device,
self.n_gpu, self.fp16, self.batch_multiplier, self.batch_size //
self.n_gpu if self.n_gpu > 1 else self.batch_size,
self.batch_size * self.batch_multiplier)
for epoch_no in range(self.epochs):
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
self.model.train()
# Reset statistics for each epoch.
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
self.model.zero_grad()
epoch_loss = 0
batch_loss = 0
for i, batch in enumerate(iter(self.train_iter)):
# create a Batch object from torchtext batch
batch = self.batch_class(batch, self.model.pad_index,
use_cuda=self.use_cuda)
# get batch loss
batch_loss += self._train_step(batch)
# update!
if (i + 1) % self.batch_multiplier == 0:
# clip gradients (in-place)
if self.clip_grad_fun is not None:
if self.fp16:
self.clip_grad_fun(
params=amp.master_params(self.optimizer))
else:
self.clip_grad_fun(params=self.model.parameters())
# make gradient step
self.optimizer.step()
# decay lr
if self.scheduler is not None \
and self.scheduler_step_at == "step":
self.scheduler.step()
# reset gradients
self.model.zero_grad()
# increment step counter
self.stats.steps += 1
if self.stats.steps >= self.stats.max_steps:
self.stats.stop = True
# log learning progress
if self.stats.steps % self.logging_freq == 0:
self.tb_writer.add_scalar("train/train_batch_loss",
batch_loss, self.stats.steps)
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.stats.total_tokens - start_tokens
logger.info(
"Epoch %3d, Step: %8d, Batch Loss: %12.6f, "
"Tokens per Sec: %8.0f, Lr: %.6f", epoch_no + 1,
self.stats.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
# Only add complete loss of full mini-batch to epoch_loss
epoch_loss += batch_loss # accumulate epoch_loss
batch_loss = 0 # rest batch_loss
# validate on the entire dev set
if self.stats.steps % self.validation_freq == 0:
valid_duration = self._validate(valid_data, epoch_no)
total_valid_duration += valid_duration
if self.stats.stop:
break
if self.stats.stop:
logger.info('Training ended since minimum lr %f was reached.',
self.learning_rate_min)
break
logger.info('Epoch %3d: total training loss %.2f', epoch_no + 1,
epoch_loss)
else:
logger.info('Training ended after %3d epochs.', epoch_no + 1)
logger.info('Best validation result (greedy) at step %8d: %6.2f %s.',
self.stats.best_ckpt_iter, self.stats.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_step(self, batch: Batch) -> Tensor:
"""
Train the model on one batch: Compute the loss.
:param batch: training batch
:return: loss for batch (sum)
"""
# reactivate training
self.model.train()
# get loss
batch_loss, _, _, _ = self.model(return_type="loss", **vars(batch))
# sum multi-gpu losses
if self.n_gpu > 1:
batch_loss = batch_loss.sum()
# normalize batch loss
if self.normalization == "batch":
normalizer = batch.nseqs
elif self.normalization == "tokens":
normalizer = batch.ntokens
elif self.normalization == "none":
normalizer = 1
else:
raise NotImplementedError("Only normalize by 'batch' or 'tokens' "
"or summation of loss 'none' implemented")
norm_batch_loss = batch_loss / normalizer
if self.n_gpu > 1:
norm_batch_loss = norm_batch_loss / self.n_gpu
if self.batch_multiplier > 1:
norm_batch_loss = norm_batch_loss / self.batch_multiplier
# accumulate gradients
if self.fp16:
with amp.scale_loss(norm_batch_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
norm_batch_loss.backward()
# increment token counter
self.stats.total_tokens += batch.ntokens
return norm_batch_loss.item()
def _validate(self, valid_data, epoch_no):
valid_start_time = time.time()
valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores = \
validate_on_data(
batch_size=self.eval_batch_size,
batch_class=self.batch_class,
data=valid_data,
eval_metric=self.eval_metric,
level=self.level, model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
compute_loss=True,
beam_size=1, # greedy validations
batch_type=self.eval_batch_type,
postprocess=True, # always remove BPE for validation
bpe_type=self.bpe_type, # "subword-nmt" or "sentencepiece"
sacrebleu=self.sacrebleu, # sacrebleu options
n_gpu=self.n_gpu
)
self.tb_writer.add_scalar("valid/valid_loss", valid_loss,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_score", valid_score,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_ppl", valid_ppl,
self.stats.steps)
if self.early_stopping_metric == "loss":
ckpt_score = valid_loss
elif self.early_stopping_metric in ["ppl", "perplexity"]:
ckpt_score = valid_ppl
else:
ckpt_score = valid_score
if self.scheduler is not None \
and self.scheduler_step_at == "validation":
self.scheduler.step(ckpt_score)
new_best = False
if self.stats.is_best(ckpt_score):
self.stats.best_ckpt_score = ckpt_score
self.stats.best_ckpt_iter = self.stats.steps
logger.info('Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxlen > 0:
logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint(new_best)
elif self.save_latest_checkpoint:
self._save_checkpoint(new_best)
# append to validation report
self._add_report(valid_score=valid_score,
valid_loss=valid_loss,
valid_ppl=valid_ppl,
eval_metric=self.eval_metric,
new_best=new_best)
self._log_examples(sources_raw=[v for v in valid_sources_raw],
sources=valid_sources,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references)
valid_duration = time.time() - valid_start_time
logger.info(
'Validation result (greedy) at epoch %3d, '
'step %8d: %s: %6.2f, loss: %8.4f, ppl: %8.4f, '
'duration: %.4fs', epoch_no + 1, self.stats.steps, self.eval_metric,
valid_score, valid_loss, valid_ppl, valid_duration)
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores:
store_attention_plots(attentions=valid_attention_scores,
targets=valid_hypotheses_raw,
sources=[s for s in valid_data.src],
indices=self.log_valid_sents,
output_prefix="{}/att.{}".format(
self.model_dir, self.stats.steps),
tb_writer=self.tb_writer,
steps=self.stats.steps)
return valid_duration
def _add_report(self,
valid_score: float,
valid_ppl: float,
valid_loss: float,
eval_metric: str,
new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stats.stop = True
with open(self.valid_report_file, 'a') as opened_file:
opened_file.write(
"Steps: {}\tLoss: {:.5f}\tPPL: {:.5f}\t{}: {:.5f}\t"
"LR: {:.8f}\t{}\n".format(self.stats.steps, valid_loss,
valid_ppl, eval_metric, valid_score,
current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
model_parameters = filter(lambda p: p.requires_grad,
self.model.parameters())
n_params = sum([np.prod(p.size()) for p in model_parameters])
logger.info("Total params: %d", n_params)
trainable_params = [
n for (n, p) in self.model.named_parameters() if p.requires_grad
]
logger.debug("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(self,
sources: List[str],
hypotheses: List[str],
references: List[str],
sources_raw: List[List[str]] = None,
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
for p in self.log_valid_sents:
if p >= len(sources):
continue
logger.info("Example #%d", p)
if sources_raw is not None:
logger.debug("\tRaw source: %s", sources_raw[p])
if references_raw is not None:
logger.debug("\tRaw reference: %s", references_raw[p])
if hypotheses_raw is not None:
logger.debug("\tRaw hypothesis: %s", hypotheses_raw[p])
logger.info("\tSource: %s", sources[p])
logger.info("\tReference: %s", references[p])
logger.info("\tHypothesis: %s", hypotheses[p])
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
current_valid_output_file = "{}/{}.hyps".format(self.model_dir,
self.stats.steps)
with open(current_valid_output_file, 'w') as opened_file:
for hyp in hypotheses:
opened_file.write("{}\n".format(hyp))
class TrainStatistics:
def __init__(self,
steps: int = 0,
stop: bool = False,
total_tokens: int = 0,
best_ckpt_iter: int = 0,
best_ckpt_score: float = np.inf,
minimize_metric: bool = True,
max_steps: int = 200000) -> None:
# global update step counter
self.steps = steps
# stop training if this flag is True
# by reaching learning rate minimum
self.stop = stop
# number of total tokens seen so far
self.total_tokens = total_tokens
# store iteration point of best ckpt
self.best_ckpt_iter = best_ckpt_iter
# initial values for best scores
self.best_ckpt_score = best_ckpt_score
# minimize or maximize score
self.minimize_metric = minimize_metric
self.max_steps = max_steps
def is_best(self, score):
if self.minimize_metric:
is_best = score < self.best_ckpt_score
else:
is_best = score > self.best_ckpt_score
return is_best
def train(cfg_file: str) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# make logger
model_dir = make_model_dir(cfg["training"]["model_dir"],
overwrite=cfg["training"].get(
"overwrite", False))
_ = make_logger(model_dir, mode="train") # version string returned
# TODO: save version number in model checkpoints
# set the random seed
set_seed(seed=cfg["training"].get("random_seed", 42))
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"])
# build an encoder-decoder model
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
# for training management, e.g. early stopping and model selection
trainer = TrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg)
log_data_info(train_data=train_data,
valid_data=dev_data,
test_data=test_data,
src_vocab=src_vocab,
trg_vocab=trg_vocab)
logger.info(str(model))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Joey-NMT')
parser.add_argument("config",
default="configs/default.yaml",
type=str,
help="Training configuration file (yaml).")
args = parser.parse_args()
train(cfg_file=args.config) | 33,649 | 40.389914 | 80 | py |
KSTER | KSTER-main/joeynmt/model.py | # coding: utf-8
"""
Module to represents whole models
"""
from typing import Callable
import logging
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
from joeynmt.initialization import initialize_model
from joeynmt.embeddings import Embeddings
from joeynmt.encoders import Encoder, RecurrentEncoder, TransformerEncoder
from joeynmt.decoders import Decoder, RecurrentDecoder, TransformerDecoder
from joeynmt.constants import PAD_TOKEN, EOS_TOKEN, BOS_TOKEN
from joeynmt.vocabulary import Vocabulary
from joeynmt.helpers import ConfigurationError
logger = logging.getLogger(__name__)
class Model(nn.Module):
"""
Base Model class
"""
def __init__(self,
encoder: Encoder,
decoder: Decoder,
src_embed: Embeddings,
trg_embed: Embeddings,
src_vocab: Vocabulary,
trg_vocab: Vocabulary) -> None:
"""
Create a new encoder-decoder model
:param encoder: encoder
:param decoder: decoder
:param src_embed: source embedding
:param trg_embed: target embedding
:param src_vocab: source vocabulary
:param trg_vocab: target vocabulary
"""
super().__init__()
self.src_embed = src_embed
self.trg_embed = trg_embed
self.encoder = encoder
self.decoder = decoder
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.bos_index = self.trg_vocab.stoi[BOS_TOKEN]
self.pad_index = self.trg_vocab.stoi[PAD_TOKEN]
self.eos_index = self.trg_vocab.stoi[EOS_TOKEN]
self._loss_function = None # set by the TrainManager
@property
def loss_function(self):
return self._x
@loss_function.setter
def loss_function(self, loss_function: Callable):
self._loss_function = loss_function
def forward(self, return_type: str = None, **kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
""" Interface for multi-gpu
For DataParallel, We need to encapsulate all model call: model.encode(),
model.decode(), and model.encode_decode() by model.__call__().
model.__call__() triggers model.forward() together with pre hooks
and post hooks, which take care of multi-gpu distribution.
:param return_type: one of {"loss", "encode", "decode"}
"""
if return_type is None:
raise ValueError("Please specify return_type: "
"{`loss`, `encode`, `decode`}.")
return_tuple = (None, None, None, None)
if return_type == "loss":
assert self.loss_function is not None
out, _, _, _ = self._encode_decode(**kwargs)
# compute log probs
log_probs = F.log_softmax(out, dim=-1)
# compute batch loss
batch_loss = self.loss_function(log_probs, kwargs["trg"])
# return batch loss
# = sum over all elements in batch that are not pad
return_tuple = (batch_loss, None, None, None)
elif return_type == "combiner_loss":
assert self.loss_function is not None
if not hasattr(self, "combiner"):
return self.forward(return_type="loss", **kwargs)
logits, hidden, _, _ = self._encode_decode(**kwargs)
# compute log probs
log_probs = self.combiner(hidden, logits)
# compute batch loss
batch_loss = self.loss_function(log_probs, kwargs["trg"])
# return batch loss
# = sum over all elements in batch that are not pad
return_tuple = (batch_loss, None, None, None)
elif return_type == "encode":
encoder_output, encoder_hidden = self._encode(**kwargs)
# return encoder outputs
return_tuple = (encoder_output, encoder_hidden, None, None)
elif return_type == "decode":
outputs, hidden, att_probs, att_vectors = self._decode(**kwargs)
# return decoder outputs
return_tuple = (outputs, hidden, att_probs, att_vectors)
return return_tuple
# pylint: disable=arguments-differ
def _encode_decode(self, src: Tensor, trg_input: Tensor, src_mask: Tensor,
src_length: Tensor, trg_mask: Tensor = None, **kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
First encodes the source sentence.
Then produces the target one word at a time.
:param src: source input
:param trg_input: target input
:param src_mask: source mask
:param src_length: length of source inputs
:param trg_mask: target mask
:return: decoder outputs
"""
encoder_output, encoder_hidden = self._encode(src=src,
src_length=src_length,
src_mask=src_mask,
**kwargs)
unroll_steps = trg_input.size(1)
assert "decoder_hidden" not in kwargs.keys()
return self._decode(encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask, trg_input=trg_input,
unroll_steps=unroll_steps,
trg_mask=trg_mask, **kwargs)
def _encode(self, src: Tensor, src_length: Tensor, src_mask: Tensor,
**_kwargs) -> (Tensor, Tensor):
"""
Encodes the source sentence.
:param src:
:param src_length:
:param src_mask:
:return: encoder outputs (output, hidden_concat)
"""
return self.encoder(self.src_embed(src), src_length, src_mask,
**_kwargs)
def _decode(self, encoder_output: Tensor, encoder_hidden: Tensor,
src_mask: Tensor, trg_input: Tensor,
unroll_steps: int, decoder_hidden: Tensor = None,
att_vector: Tensor = None, trg_mask: Tensor = None, **_kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
Decode, given an encoded source sentence.
:param encoder_output: encoder states for attention computation
:param encoder_hidden: last encoder state for decoder initialization
:param src_mask: source mask, 1 at valid tokens
:param trg_input: target inputs
:param unroll_steps: number of steps to unrol the decoder for
:param decoder_hidden: decoder hidden state (optional)
:param att_vector: previous attention vector (optional)
:param trg_mask: mask for target steps
:return: decoder outputs (outputs, hidden, att_probs, att_vectors)
"""
return self.decoder(trg_embed=self.trg_embed(trg_input),
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
unroll_steps=unroll_steps,
hidden=decoder_hidden,
prev_att_vector=att_vector,
trg_mask=trg_mask,
**_kwargs)
def __repr__(self) -> str:
"""
String representation: a description of encoder, decoder and embeddings
:return: string representation
"""
return "%s(\n" \
"\tencoder=%s,\n" \
"\tdecoder=%s,\n" \
"\tsrc_embed=%s,\n" \
"\ttrg_embed=%s)" % (self.__class__.__name__, self.encoder,
self.decoder, self.src_embed,
self.trg_embed)
class _DataParallel(nn.DataParallel):
""" DataParallel wrapper to pass through the model attributes """
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def build_model(cfg: dict = None,
src_vocab: Vocabulary = None,
trg_vocab: Vocabulary = None) -> Model:
"""
Build and initialize the model according to the configuration.
:param cfg: dictionary configuration containing model specifications
:param src_vocab: source vocabulary
:param trg_vocab: target vocabulary
:return: built and initialized model
"""
logger.info("Building an encoder-decoder model...")
src_padding_idx = src_vocab.stoi[PAD_TOKEN]
trg_padding_idx = trg_vocab.stoi[PAD_TOKEN]
src_embed = Embeddings(
**cfg["encoder"]["embeddings"], vocab_size=len(src_vocab),
padding_idx=src_padding_idx)
# this ties source and target embeddings
# for softmax layer tying, see further below
if cfg.get("tied_embeddings", False):
if src_vocab.itos == trg_vocab.itos:
# share embeddings for src and trg
trg_embed = src_embed
else:
raise ConfigurationError(
"Embedding cannot be tied since vocabularies differ.")
else:
trg_embed = Embeddings(
**cfg["decoder"]["embeddings"], vocab_size=len(trg_vocab),
padding_idx=trg_padding_idx)
# build encoder
enc_dropout = cfg["encoder"].get("dropout", 0.)
enc_emb_dropout = cfg["encoder"]["embeddings"].get("dropout", enc_dropout)
if cfg["encoder"].get("type", "recurrent") == "transformer":
assert cfg["encoder"]["embeddings"]["embedding_dim"] == \
cfg["encoder"]["hidden_size"], \
"for transformer, emb_size must be hidden_size"
encoder = TransformerEncoder(**cfg["encoder"],
emb_size=src_embed.embedding_dim,
emb_dropout=enc_emb_dropout)
else:
encoder = RecurrentEncoder(**cfg["encoder"],
emb_size=src_embed.embedding_dim,
emb_dropout=enc_emb_dropout)
# build decoder
dec_dropout = cfg["decoder"].get("dropout", 0.)
dec_emb_dropout = cfg["decoder"]["embeddings"].get("dropout", dec_dropout)
if cfg["decoder"].get("type", "recurrent") == "transformer":
decoder = TransformerDecoder(
**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab),
emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout)
else:
decoder = RecurrentDecoder(
**cfg["decoder"], encoder=encoder, vocab_size=len(trg_vocab),
emb_size=trg_embed.embedding_dim, emb_dropout=dec_emb_dropout)
model = Model(encoder=encoder, decoder=decoder,
src_embed=src_embed, trg_embed=trg_embed,
src_vocab=src_vocab, trg_vocab=trg_vocab)
# tie softmax layer with trg embeddings
if cfg.get("tied_softmax", False):
if trg_embed.lut.weight.shape == \
model.decoder.output_layer.weight.shape:
# (also) share trg embeddings and softmax layer:
model.decoder.output_layer.weight = trg_embed.lut.weight
else:
raise ConfigurationError(
"For tied_softmax, the decoder embedding_dim and decoder "
"hidden_size must be the same."
"The decoder must be a Transformer.")
# custom initialization of model parameters
initialize_model(model, cfg, src_padding_idx, trg_padding_idx)
# initialize embeddings from file
pretrained_enc_embed_path = cfg["encoder"]["embeddings"].get(
"load_pretrained", None)
pretrained_dec_embed_path = cfg["decoder"]["embeddings"].get(
"load_pretrained", None)
if pretrained_enc_embed_path:
logger.info("Loading pretraind src embeddings...")
model.src_embed.load_from_file(pretrained_enc_embed_path, src_vocab)
if pretrained_dec_embed_path and not cfg.get("tied_embeddings", False):
logger.info("Loading pretraind trg embeddings...")
model.trg_embed.load_from_file(pretrained_dec_embed_path, trg_vocab)
logger.info("Enc-dec model built.")
return model
| 12,260 | 37.800633 | 80 | py |
KSTER | KSTER-main/joeynmt/data.py | # coding: utf-8
"""
Data module
"""
import sys
import random
import os
import os.path
from typing import Optional
import logging
from torchtext.datasets import TranslationDataset
from torchtext import data
from torchtext.data import Dataset, Iterator, Field
from joeynmt.constants import UNK_TOKEN, EOS_TOKEN, BOS_TOKEN, PAD_TOKEN
from joeynmt.vocabulary import build_vocab, Vocabulary
logger = logging.getLogger(__name__)
def load_data(data_cfg: dict, datasets: list = None)\
-> (Dataset, Dataset, Optional[Dataset], Vocabulary, Vocabulary):
"""
Load train, dev and optionally test data as specified in configuration.
Vocabularies are created from the training set with a limit of `voc_limit`
tokens and a minimum token frequency of `voc_min_freq`
(specified in the configuration dictionary).
The training data is filtered to include sentences up to `max_sent_length`
on source and target side.
If you set ``random_train_subset``, a random selection of this size is used
from the training set instead of the full training set.
:param data_cfg: configuration dictionary for data
("data" part of configuation file)
:param datasets: list of dataset names to load
:return:
- train_data: training dataset
- dev_data: development dataset
- test_data: testdata set if given, otherwise None
- src_vocab: source vocabulary extracted from training data
- trg_vocab: target vocabulary extracted from training data
"""
if datasets is None:
datasets = ["train", "dev", "test"]
# load data from files
src_lang = data_cfg["src"]
trg_lang = data_cfg["trg"]
train_path = data_cfg.get("train", None)
dev_path = data_cfg.get("dev", None)
test_path = data_cfg.get("test", None)
if train_path is None and dev_path is None and test_path is None:
raise ValueError('Please specify at least one data source path.')
level = data_cfg["level"]
lowercase = data_cfg["lowercase"]
max_sent_length = data_cfg["max_sent_length"]
tok_fun = lambda s: list(s) if level == "char" else s.split()
src_field = data.Field(init_token=None, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
batch_first=True, lower=lowercase,
unk_token=UNK_TOKEN,
include_lengths=True)
trg_field = data.Field(init_token=BOS_TOKEN, eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN, tokenize=tok_fun,
unk_token=UNK_TOKEN,
batch_first=True, lower=lowercase,
include_lengths=True)
train_data = None
if "train" in datasets and train_path is not None:
logger.info("Loading training data...")
train_data = TranslationDataset(path=train_path,
exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field),
filter_pred=
lambda x: len(vars(x)['src'])
<= max_sent_length
and len(vars(x)['trg'])
<= max_sent_length)
random_train_subset = data_cfg.get("random_train_subset", -1)
if random_train_subset > -1:
# select this many training examples randomly and discard the rest
keep_ratio = random_train_subset / len(train_data)
keep, _ = train_data.split(
split_ratio=[keep_ratio, 1 - keep_ratio],
random_state=random.getstate())
train_data = keep
src_max_size = data_cfg.get("src_voc_limit", sys.maxsize)
src_min_freq = data_cfg.get("src_voc_min_freq", 1)
trg_max_size = data_cfg.get("trg_voc_limit", sys.maxsize)
trg_min_freq = data_cfg.get("trg_voc_min_freq", 1)
src_vocab_file = data_cfg.get("src_vocab", None)
trg_vocab_file = data_cfg.get("trg_vocab", None)
assert (train_data is not None) or (src_vocab_file is not None)
assert (train_data is not None) or (trg_vocab_file is not None)
logger.info("Building vocabulary...")
src_vocab = build_vocab(field="src", min_freq=src_min_freq,
max_size=src_max_size,
dataset=train_data, vocab_file=src_vocab_file)
trg_vocab = build_vocab(field="trg", min_freq=trg_min_freq,
max_size=trg_max_size,
dataset=train_data, vocab_file=trg_vocab_file)
dev_data = None
if "dev" in datasets and dev_path is not None:
logger.info("Loading dev data...")
dev_data = TranslationDataset(path=dev_path,
exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field))
test_data = None
if "test" in datasets and test_path is not None:
logger.info("Loading test data...")
# check if target exists
if os.path.isfile(test_path + "." + trg_lang):
test_data = TranslationDataset(
path=test_path, exts=("." + src_lang, "." + trg_lang),
fields=(src_field, trg_field))
else:
# no target is given -> create dataset from src only
test_data = MonoDataset(path=test_path, ext="." + src_lang,
field=src_field)
src_field.vocab = src_vocab
trg_field.vocab = trg_vocab
logger.info("Data loaded.")
return train_data, dev_data, test_data, src_vocab, trg_vocab
# pylint: disable=global-at-module-level
global max_src_in_batch, max_tgt_in_batch
# pylint: disable=unused-argument,global-variable-undefined
def token_batch_size_fn(new, count, sofar):
"""Compute batch size based on number of tokens (+padding)."""
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
src_elements = count * max_src_in_batch
if hasattr(new, 'trg'): # for monolingual data sets ("translate" mode)
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
tgt_elements = count * max_tgt_in_batch
else:
tgt_elements = 0
return max(src_elements, tgt_elements)
def make_data_iter(dataset: Dataset,
batch_size: int,
batch_type: str = "sentence",
train: bool = False,
shuffle: bool = False) -> Iterator:
"""
Returns a torchtext iterator for a torchtext dataset.
:param dataset: torchtext dataset containing src and optionally trg
:param batch_size: size of the batches the iterator prepares
:param batch_type: measure batch size by sentence count or by token count
:param train: whether it's training time, when turned off,
bucketing, sorting within batches and shuffling is disabled
:param shuffle: whether to shuffle the data before each epoch
(no effect if set to True for testing)
:return: torchtext iterator
"""
batch_size_fn = token_batch_size_fn if batch_type == "token" else None
if train:
# optionally shuffle and sort during training
data_iter = data.BucketIterator(
repeat=False, sort=False, dataset=dataset,
batch_size=batch_size, batch_size_fn=batch_size_fn,
train=True, sort_within_batch=True,
sort_key=lambda x: len(x.src), shuffle=shuffle)
else:
# don't sort/shuffle for validation/inference
data_iter = data.BucketIterator(
repeat=False, dataset=dataset,
batch_size=batch_size, batch_size_fn=batch_size_fn,
train=False, sort=False)
return data_iter
class MonoDataset(Dataset):
"""Defines a dataset for machine translation without targets."""
@staticmethod
def sort_key(ex):
return len(ex.src)
def __init__(self, path: str, ext: str, field: Field, **kwargs) -> None:
"""
Create a monolingual dataset (=only sources) given path and field.
:param path: Prefix of path to the data file
:param ext: Containing the extension to path for this language.
:param field: Containing the fields that will be used for data.
:param kwargs: Passed to the constructor of data.Dataset.
"""
fields = [('src', field)]
if hasattr(path, "readline"): # special usage: stdin
src_file = path
else:
src_path = os.path.expanduser(path + ext)
src_file = open(src_path)
examples = []
for src_line in src_file:
src_line = src_line.strip()
if src_line != '':
examples.append(data.Example.fromlist(
[src_line], fields))
src_file.close()
super().__init__(examples, fields, **kwargs)
| 9,111 | 37.774468 | 79 | py |
KSTER | KSTER-main/joeynmt/transformer_layers.py | # -*- coding: utf-8 -*-
import math
import torch
import torch.nn as nn
from torch import Tensor
# pylint: disable=arguments-differ
class MultiHeadedAttention(nn.Module):
"""
Multi-Head Attention module from "Attention is All You Need"
Implementation modified from OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self, num_heads: int, size: int, dropout: float = 0.1):
"""
Create a multi-headed attention layer.
:param num_heads: the number of heads
:param size: model size (must be divisible by num_heads)
:param dropout: probability of dropping a unit
"""
super().__init__()
assert size % num_heads == 0
self.head_size = head_size = size // num_heads
self.model_size = size
self.num_heads = num_heads
self.k_layer = nn.Linear(size, num_heads * head_size)
self.v_layer = nn.Linear(size, num_heads * head_size)
self.q_layer = nn.Linear(size, num_heads * head_size)
self.output_layer = nn.Linear(size, size)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, k: Tensor, v: Tensor, q: Tensor, mask: Tensor = None):
"""
Computes multi-headed attention.
:param k: keys [B, M, D] with M being the sentence length.
:param v: values [B, M, D]
:param q: query [B, M, D]
:param mask: optional mask [B, 1, M]
:return:
"""
batch_size = k.size(0)
num_heads = self.num_heads
# project the queries (q), keys (k), and values (v)
k = self.k_layer(k)
v = self.v_layer(v)
q = self.q_layer(q)
# reshape q, k, v for our computation to [batch_size, num_heads, ..]
k = k.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
v = v.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
q = q.view(batch_size, -1, num_heads, self.head_size).transpose(1, 2)
# compute scores
q = q / math.sqrt(self.head_size)
# batch x num_heads x query_len x key_len
scores = torch.matmul(q, k.transpose(2, 3))
# apply the mask (if we have one)
# we add a dimension for the heads to it below: [B, 1, 1, M]
if mask is not None:
scores = scores.masked_fill(~mask.unsqueeze(1), float('-inf'))
# apply attention dropout and compute context vectors.
attention = self.softmax(scores)
attention = self.dropout(attention)
# get context vector (select values with attention) and reshape
# back to [B, M, D]
context = torch.matmul(attention, v)
context = context.transpose(1, 2).contiguous().view(
batch_size, -1, num_heads * self.head_size)
output = self.output_layer(context)
return output
# pylint: disable=arguments-differ
class PositionwiseFeedForward(nn.Module):
"""
Position-wise Feed-forward layer
Projects to ff_size and then back down to input_size.
"""
def __init__(self, input_size, ff_size, dropout=0.1):
"""
Initializes position-wise feed-forward layer.
:param input_size: dimensionality of the input.
:param ff_size: dimensionality of intermediate representation
:param dropout:
"""
super().__init__()
self.layer_norm = nn.LayerNorm(input_size, eps=1e-6)
self.pwff_layer = nn.Sequential(
nn.Linear(input_size, ff_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(ff_size, input_size),
nn.Dropout(dropout),
)
def forward(self, x):
x_norm = self.layer_norm(x)
return self.pwff_layer(x_norm) + x
# pylint: disable=arguments-differ
class PositionalEncoding(nn.Module):
"""
Pre-compute position encodings (PE).
In forward pass, this adds the position-encodings to the
input for as many time steps as necessary.
Implementation based on OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self,
size: int = 0,
max_len: int = 5000):
"""
Positional Encoding with maximum length max_len
:param size:
:param max_len:
:param dropout:
"""
if size % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(size))
pe = torch.zeros(max_len, size)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *
-(math.log(10000.0) / size)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0) # shape: [1, size, max_len]
super().__init__()
self.register_buffer('pe', pe)
self.dim = size
def forward(self, emb):
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(seq_len, batch_size, self.dim)``
"""
# Add position encodings
return emb + self.pe[:, :emb.size(1)]
class TransformerEncoderLayer(nn.Module):
"""
One Transformer encoder layer has a Multi-head attention layer plus
a position-wise feed-forward layer.
"""
def __init__(self,
size: int = 0,
ff_size: int = 0,
num_heads: int = 0,
dropout: float = 0.1):
"""
A single Transformer layer.
:param size:
:param ff_size:
:param num_heads:
:param dropout:
"""
super().__init__()
self.layer_norm = nn.LayerNorm(size, eps=1e-6)
self.src_src_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,
dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.size = size
# pylint: disable=arguments-differ
def forward(self, x: Tensor, mask: Tensor) -> Tensor:
"""
Forward pass for a single transformer encoder layer.
First applies layer norm, then self attention,
then dropout with residual connection (adding the input to the result),
and then a position-wise feed-forward layer.
:param x: layer input
:param mask: input mask
:return: output tensor
"""
x_norm = self.layer_norm(x)
h = self.src_src_att(x_norm, x_norm, x_norm, mask)
h = self.dropout(h) + x
o = self.feed_forward(h)
return o
class TransformerDecoderLayer(nn.Module):
"""
Transformer decoder layer.
Consists of self-attention, source-attention, and feed-forward.
"""
def __init__(self,
size: int = 0,
ff_size: int = 0,
num_heads: int = 0,
dropout: float = 0.1):
"""
Represents a single Transformer decoder layer.
It attends to the source representation and the previous decoder states.
:param size: model dimensionality
:param ff_size: size of the feed-forward intermediate layer
:param num_heads: number of heads
:param dropout: dropout to apply to input
"""
super().__init__()
self.size = size
self.trg_trg_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.src_trg_att = MultiHeadedAttention(num_heads, size,
dropout=dropout)
self.feed_forward = PositionwiseFeedForward(size, ff_size=ff_size,
dropout=dropout)
self.x_layer_norm = nn.LayerNorm(size, eps=1e-6)
self.dec_layer_norm = nn.LayerNorm(size, eps=1e-6)
self.dropout = nn.Dropout(dropout)
# pylint: disable=arguments-differ
def forward(self,
x: Tensor = None,
memory: Tensor = None,
src_mask: Tensor = None,
trg_mask: Tensor = None) -> Tensor:
"""
Forward pass of a single Transformer decoder layer.
:param x: inputs
:param memory: source representations
:param src_mask: source mask
:param trg_mask: target mask (so as to not condition on future steps)
:return: output tensor
"""
# decoder/target self-attention
x_norm = self.x_layer_norm(x)
h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)
h1 = self.dropout(h1) + x
# source-target attention
h1_norm = self.dec_layer_norm(h1)
h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)
# final position-wise feed-forward layer
o = self.feed_forward(self.dropout(h2) + h1)
return o
def context_representations(self, x: Tensor = None,
memory: Tensor = None,
src_mask: Tensor = None,
trg_mask: Tensor = None) -> Tensor:
"""
Compute context representations from last decoder layer in Transformer.
:param x: inputs
:param memory: source representations
:param src_mask: source mask
:param trg_mask: target mask (so as to not condition on future steps)
:return: output tensor
"""
# decoder/target self-attention
x_norm = self.x_layer_norm(x)
h1 = self.trg_trg_att(x_norm, x_norm, x_norm, mask=trg_mask)
h1 = self.dropout(h1) + x
# source-target attention
h1_norm = self.dec_layer_norm(h1)
h2 = self.src_trg_att(memory, memory, h1_norm, mask=src_mask)
# final position-wise feed-forward layer
o = self.feed_forward.layer_norm(self.dropout(h2) + h1)
return o | 10,131 | 32.66113 | 80 | py |
KSTER | KSTER-main/joeynmt/initialization.py | # coding: utf-8
"""
Implements custom initialization
"""
import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import _calculate_fan_in_and_fan_out
def orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.):
"""
Orthogonal initialization of recurrent weights
RNN parameters contain 3 or 4 matrices in one parameter, so we slice it.
"""
with torch.no_grad():
for _, hh, _, _ in cell.all_weights:
for i in range(0, hh.size(0), cell.hidden_size):
nn.init.orthogonal_(hh.data[i:i + cell.hidden_size], gain=gain)
def lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.) -> None:
"""
Initialize LSTM forget gates with `value`.
:param cell: LSTM cell
:param value: initial value, default: 1
"""
with torch.no_grad():
for _, _, ih_b, hh_b in cell.all_weights:
l = len(ih_b)
ih_b.data[l // 4:l // 2].fill_(value)
hh_b.data[l // 4:l // 2].fill_(value)
def xavier_uniform_n_(w: Tensor, gain: float = 1., n: int = 4) -> None:
"""
Xavier initializer for parameters that combine multiple matrices in one
parameter for efficiency. This is e.g. used for GRU and LSTM parameters,
where e.g. all gates are computed at the same time by 1 big matrix.
:param w: parameter
:param gain: default 1
:param n: default 4
"""
with torch.no_grad():
fan_in, fan_out = _calculate_fan_in_and_fan_out(w)
assert fan_out % n == 0, "fan_out should be divisible by n"
fan_out //= n
std = gain * math.sqrt(2.0 / (fan_in + fan_out))
a = math.sqrt(3.0) * std
nn.init.uniform_(w, -a, a)
# pylint: disable=too-many-branches
def initialize_model(model: nn.Module, cfg: dict, src_padding_idx: int,
trg_padding_idx: int) -> None:
"""
This initializes a model based on the provided config.
All initializer configuration is part of the `model` section of the
configuration file.
For an example, see e.g. `https://github.com/joeynmt/joeynmt/
blob/master/configs/iwslt_envi_xnmt.yaml#L47`
The main initializer is set using the `initializer` key.
Possible values are `xavier`, `uniform`, `normal` or `zeros`.
(`xavier` is the default).
When an initializer is set to `uniform`, then `init_weight` sets the
range for the values (-init_weight, init_weight).
When an initializer is set to `normal`, then `init_weight` sets the
standard deviation for the weights (with mean 0).
The word embedding initializer is set using `embed_initializer` and takes
the same values. The default is `normal` with `embed_init_weight = 0.01`.
Biases are initialized separately using `bias_initializer`.
The default is `zeros`, but you can use the same initializers as
the main initializer.
Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization
(for recurrent matrices). Default is False.
`lstm_forget_gate` controls how the LSTM forget gate is initialized.
Default is `1`.
:param model: model to initialize
:param cfg: the model configuration
:param src_padding_idx: index of source padding token
:param trg_padding_idx: index of target padding token
"""
# defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal
gain = float(cfg.get("init_gain", 1.0)) # for xavier
init = cfg.get("initializer", "xavier")
init_weight = float(cfg.get("init_weight", 0.01))
embed_init = cfg.get("embed_initializer", "normal")
embed_init_weight = float(cfg.get("embed_init_weight", 0.01))
embed_gain = float(cfg.get("embed_init_gain", 1.0)) # for xavier
bias_init = cfg.get("bias_initializer", "zeros")
bias_init_weight = float(cfg.get("bias_init_weight", 0.01))
# pylint: disable=unnecessary-lambda, no-else-return
def _parse_init(s, scale, _gain):
scale = float(scale)
assert scale > 0., "incorrect init_weight"
if s.lower() == "xavier":
return lambda p: nn.init.xavier_uniform_(p, gain=_gain)
elif s.lower() == "uniform":
return lambda p: nn.init.uniform_(p, a=-scale, b=scale)
elif s.lower() == "normal":
return lambda p: nn.init.normal_(p, mean=0., std=scale)
elif s.lower() == "zeros":
return lambda p: nn.init.zeros_(p)
else:
raise ValueError("unknown initializer")
init_fn_ = _parse_init(init, init_weight, gain)
embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)
bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)
with torch.no_grad():
for name, p in model.named_parameters():
if "embed" in name:
embed_init_fn_(p)
elif "bias" in name:
bias_init_fn_(p)
elif len(p.size()) > 1:
# RNNs combine multiple matrices is one, which messes up
# xavier initialization
if init == "xavier" and "rnn" in name:
n = 1
if "encoder" in name:
n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3
elif "decoder" in name:
n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3
xavier_uniform_n_(p.data, gain=gain, n=n)
else:
init_fn_(p)
# zero out paddings
model.src_embed.lut.weight.data[src_padding_idx].zero_()
model.trg_embed.lut.weight.data[trg_padding_idx].zero_()
orthogonal = cfg.get("init_rnn_orthogonal", False)
lstm_forget_gate = cfg.get("lstm_forget_gate", 1.)
# encoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.encoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.encoder.rnn)
if isinstance(model.encoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)
# decoder rnn orthogonal initialization & LSTM forget gate
if hasattr(model.decoder, "rnn"):
if orthogonal:
orthogonal_rnn_init_(model.decoder.rnn)
if isinstance(model.decoder.rnn, nn.LSTM):
lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)
| 6,419 | 35.271186 | 79 | py |
KSTER | KSTER-main/joeynmt/builders.py | # coding: utf-8
"""
Collection of builder functions
"""
from typing import Callable, Optional, Generator
import torch
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau, \
StepLR, ExponentialLR
from torch.optim import Optimizer
from joeynmt.helpers import ConfigurationError
def build_gradient_clipper(config: dict) -> Optional[Callable]:
"""
Define the function for gradient clipping as specified in configuration.
If not specified, returns None.
Current options:
- "clip_grad_val": clip the gradients if they exceed this value,
see `torch.nn.utils.clip_grad_value_`
- "clip_grad_norm": clip the gradients if their norm exceeds this value,
see `torch.nn.utils.clip_grad_norm_`
:param config: dictionary with training configurations
:return: clipping function (in-place) or None if no gradient clipping
"""
clip_grad_fun = None
if "clip_grad_val" in config.keys():
clip_value = config["clip_grad_val"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_value_(parameters=params,
clip_value=clip_value)
elif "clip_grad_norm" in config.keys():
max_norm = config["clip_grad_norm"]
clip_grad_fun = lambda params: \
nn.utils.clip_grad_norm_(parameters=params, max_norm=max_norm)
if "clip_grad_val" in config.keys() and "clip_grad_norm" in config.keys():
raise ConfigurationError(
"You can only specify either clip_grad_val or clip_grad_norm.")
return clip_grad_fun
def build_optimizer(config: dict, parameters: Generator) -> Optimizer:
"""
Create an optimizer for the given parameters as specified in config.
Except for the weight decay and initial learning rate,
default optimizer settings are used.
Currently supported configuration settings for "optimizer":
- "sgd" (default): see `torch.optim.SGD`
- "adam": see `torch.optim.adam`
- "adagrad": see `torch.optim.adagrad`
- "adadelta": see `torch.optim.adadelta`
- "rmsprop": see `torch.optim.RMSprop`
The initial learning rate is set according to "learning_rate" in the config.
The weight decay is set according to "weight_decay" in the config.
If they are not specified, the initial learning rate is set to 3.0e-4, the
weight decay to 0.
Note that the scheduler state is saved in the checkpoint, so if you load
a model for further training you have to use the same type of scheduler.
:param config: configuration dictionary
:param parameters:
:return: optimizer
"""
optimizer_name = config.get("optimizer", "sgd").lower()
learning_rate = config.get("learning_rate", 3.0e-4)
weight_decay = config.get("weight_decay", 0)
if optimizer_name == "adam":
adam_betas = config.get("adam_betas", (0.9, 0.999))
optimizer = torch.optim.Adam(parameters,
weight_decay=weight_decay,
lr=learning_rate,
betas=adam_betas)
elif optimizer_name == "adagrad":
optimizer = torch.optim.Adagrad(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "adadelta":
optimizer = torch.optim.Adadelta(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "rmsprop":
optimizer = torch.optim.RMSprop(parameters,
weight_decay=weight_decay,
lr=learning_rate)
elif optimizer_name == "sgd":
# default
optimizer = torch.optim.SGD(parameters,
weight_decay=weight_decay,
lr=learning_rate)
else:
raise ConfigurationError("Invalid optimizer. Valid options: 'adam', "
"'adagrad', 'adadelta', 'rmsprop', 'sgd'.")
return optimizer
def build_scheduler(config: dict, optimizer: Optimizer, scheduler_mode: str,
hidden_size: int = 0) \
-> (Optional[_LRScheduler], Optional[str]):
"""
Create a learning rate scheduler if specified in config and
determine when a scheduler step should be executed.
Current options:
- "plateau": see `torch.optim.lr_scheduler.ReduceLROnPlateau`
- "decaying": see `torch.optim.lr_scheduler.StepLR`
- "exponential": see `torch.optim.lr_scheduler.ExponentialLR`
- "noam": see `joeynmt.builders.NoamScheduler`
- "warmupexponentialdecay": see
`joeynmt.builders.WarmupExponentialDecayScheduler`
If no scheduler is specified, returns (None, None) which will result in
a constant learning rate.
:param config: training configuration
:param optimizer: optimizer for the scheduler, determines the set of
parameters which the scheduler sets the learning rate for
:param scheduler_mode: "min" or "max", depending on whether the validation
score should be minimized or maximized.
Only relevant for "plateau".
:param hidden_size: encoder hidden size (required for NoamScheduler)
:return:
- scheduler: scheduler object,
- scheduler_step_at: either "validation" or "epoch"
"""
scheduler, scheduler_step_at = None, None
if "scheduling" in config.keys() and \
config["scheduling"]:
if config["scheduling"].lower() == "plateau":
# learning rate scheduler
scheduler = ReduceLROnPlateau(optimizer=optimizer,
mode=scheduler_mode,
verbose=False,
threshold_mode='abs',
factor=config.get(
"decrease_factor", 0.1),
patience=config.get("patience", 10))
# scheduler step is executed after every validation
scheduler_step_at = "validation"
elif config["scheduling"].lower() == "decaying":
scheduler = StepLR(optimizer=optimizer,
step_size=config.get("decaying_step_size", 1))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "exponential":
scheduler = ExponentialLR(optimizer=optimizer,
gamma=config.get("decrease_factor", 0.99))
# scheduler step is executed after every epoch
scheduler_step_at = "epoch"
elif config["scheduling"].lower() == "noam":
factor = config.get("learning_rate_factor", 1)
warmup = config.get("learning_rate_warmup", 4000)
scheduler = NoamScheduler(hidden_size=hidden_size,
factor=factor,
warmup=warmup,
optimizer=optimizer)
scheduler_step_at = "step"
elif config["scheduling"].lower() == "warmupexponentialdecay":
min_rate = config.get("learning_rate_min", 1.0e-5)
decay_rate = config.get("learning_rate_decay", 0.1)
warmup = config.get("learning_rate_warmup", 4000)
peak_rate = config.get("learning_rate_peak", 1.0e-3)
decay_length = config.get("learning_rate_decay_length", 10000)
scheduler = WarmupExponentialDecayScheduler(
min_rate=min_rate,
decay_rate=decay_rate,
warmup=warmup,
optimizer=optimizer,
peak_rate=peak_rate,
decay_length=decay_length)
scheduler_step_at = "step"
return scheduler, scheduler_step_at
class NoamScheduler:
"""
The Noam learning rate scheduler used in "Attention is all you need"
See Eq. 3 in https://arxiv.org/pdf/1706.03762.pdf
"""
def __init__(self,
hidden_size: int,
optimizer: torch.optim.Optimizer,
factor: float = 1,
warmup: int = 4000):
"""
Warm-up, followed by learning rate decay.
:param hidden_size:
:param optimizer:
:param factor: decay factor
:param warmup: number of warmup steps
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.hidden_size = hidden_size
self._rate = 0
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
return self.factor * \
(self.hidden_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"step": self._step,
"warmup": self.warmup,
"factor": self.factor,
"hidden_size": self.hidden_size,
"rate": self._rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self._step = state_dict["step"]
self.warmup = state_dict["warmup"]
self.factor = state_dict["factor"]
self.hidden_size = state_dict["hidden_size"]
self._rate = state_dict["rate"]
class WarmupExponentialDecayScheduler:
"""
A learning rate scheduler similar to Noam, but modified:
Keep the warm up period but make it so that the decay rate can be tuneable.
The decay is exponential up to a given minimum rate.
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
peak_rate: float = 1.0e-3,
decay_length: int = 10000,
warmup: int = 4000,
decay_rate: float = 0.5,
min_rate: float = 1.0e-5):
"""
Warm-up, followed by exponential learning rate decay.
:param peak_rate: maximum learning rate at peak after warmup
:param optimizer:
:param decay_length: decay length after warmup
:param decay_rate: decay rate after warmup
:param warmup: number of warmup steps
:param min_rate: minimum learning rate
"""
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.decay_length = decay_length
self.peak_rate = peak_rate
self._rate = 0
self.decay_rate = decay_rate
self.min_rate = min_rate
def step(self):
"""Update parameters and rate"""
self._step += 1
rate = self._compute_rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
def _compute_rate(self):
"""Implement `lrate` above"""
step = self._step
warmup = self.warmup
if step < warmup:
rate = step * self.peak_rate / warmup
else:
exponent = (step - warmup) / self.decay_length
rate = self.peak_rate * (self.decay_rate**exponent)
return max(rate, self.min_rate)
def state_dict(self):
"""Returns dictionary of values necessary to reconstruct scheduler"""
state_dict = {
"warmup": self.warmup,
"step": self._step,
"decay_length": self.decay_length,
"peak_rate": self.peak_rate,
"rate": self._rate,
"decay_rate": self.decay_rate,
"min_rate": self.min_rate
}
return state_dict
def load_state_dict(self, state_dict):
"""Given a state_dict, this function loads scheduler's state"""
self.warmup = state_dict['warmup']
self._step = state_dict['step']
self.decay_length = state_dict['decay_length']
self.peak_rate = state_dict['peak_rate']
self._rate = state_dict['rate']
self.decay_rate = state_dict['decay_rate']
self.min_rate = state_dict['min_rate']
| 12,622 | 38.446875 | 80 | py |
KSTER | KSTER-main/joeynmt/combiners.py | import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import init
import numpy as np
import math
from typing import Tuple
from joeynmt.database import Database, EnhancedDatabase
from joeynmt.kernel import Kernel, GaussianKernel, LaplacianKernel
class Combiner(nn.Module):
def __init__(self) -> None:
super(Combiner, self).__init__()
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
"""
:param hidden: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:return log_probs: FloatTensor (batch_size, seq_len, vocab_size)
"""
raise NotImplementedError("The forward method is not implemented in the Combiner class.")
def detailed_forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> Tuple[torch.Tensor,
torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param hidden: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param trg: true targets for force decoding.
:return mixed_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return model_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return example_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return mixing_weight: FloatTensor (batch_size, seq_len)
:return bandwidth: FloatTensor (batch_size, seq_len)
"""
raise NotImplementedError("The forward method is not implemented in the Combiner class.")
class NoCombiner(Combiner):
def __init__(self) -> None:
super(NoCombiner, self).__init__()
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
log_probs = F.log_softmax(logits, dim=-1)
return log_probs
class StaticCombiner(Combiner):
def __init__(self, database: Database, top_k: int, mixing_weight: float, kernel: Kernel, bandwidth: float) -> None:
super(StaticCombiner, self).__init__()
self.database = database
self.top_k = top_k
self.mixing_weight = mixing_weight
self.kernel = kernel
self.bandwidth = bandwidth
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
distances, token_indices = self.database.search(hidden.cpu().numpy(), top_k=self.top_k)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
example_based_distribution, _ = self.kernel.compute_example_based_distribution(distances, self.bandwidth, token_indices, vocab_size)
mixed_distribution = (1 - self.mixing_weight) * model_based_distribution + self.mixing_weight * example_based_distribution
log_probs = torch.log(mixed_distribution)
log_probs = log_probs.view(batch_size, seq_len, vocab_size).contiguous()
return log_probs
class DynamicCombiner(Combiner):
def __init__(self, database: EnhancedDatabase, top_k: int, kernel: Kernel) -> None:
super(DynamicCombiner, self).__init__()
self.database = database
self.top_k = top_k
self.kernel = kernel
dimension = database.index.index.d
self.bandwidth_estimator = nn.Linear(2 * dimension, 1)
if isinstance(kernel, GaussianKernel):
self.bandwidth_estimator.bias.data[0] = math.log(100)
else:
self.bandwidth_estimator.bias.data[0] = math.log(10)
self.mixing_weight_estimator = nn.Sequential(
nn.Linear(2 * dimension, dimension),
nn.ReLU(),
nn.Linear(dimension, 1)
)
def compute_bandwidth(self, hidden: torch.Tensor, searched_hidden: torch.Tensor) -> torch.Tensor:
"""
:param hidden: torch.FloatTensor (batch_size * seq_len, hidden_size)
:param searched_hidden: torch.FloatTensor (batch_size * seq_len, top_k, hidden_size)
:return bandwidth: torch.FloatTensor (batch_size * seq_len,)
"""
mean_hidden = searched_hidden.mean(dim=1)
bandwidth = torch.exp(self.bandwidth_estimator(torch.cat([hidden, mean_hidden], dim=-1)))
return bandwidth
def compute_mixing_weight(self, hidden: torch.Tensor, searched_hidden: torch.Tensor, sparse_probs: torch.Tensor) -> torch.Tensor:
"""
:param hidden: torch.FloatTensor (batch_size * seq_len, hidden_size)
:param searched_hidden: torch.FloatTensor (batch_size * seq_len, top_k, hidden_size)
:param sparse_probs: torch.FloatTensor (batch_size * seq_len, top_k)
:return mixing_weight: torch.FloatTensor (batch_size * seq_len,)
"""
merged_hidden = searched_hidden.transpose(1, 2).matmul(sparse_probs.unsqueeze(-1)).squeeze(-1)
mixing_weight = torch.sigmoid(self.mixing_weight_estimator(torch.cat([hidden, merged_hidden], dim=-1)))
return mixing_weight
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
# reshape hidden and logits for database retrieval
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
# retrieve examples from database
if self.training:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=True)
else:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=False)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
searched_hidden = torch.FloatTensor(searched_hidden).to(hidden.device)
# compute dynamic database bandwidth
bandwidth = self.compute_bandwidth(hidden, searched_hidden)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
example_based_distribution, sparse_example_based_distribution = self.kernel.compute_example_based_distribution(distances,
bandwidth, token_indices, vocab_size)
mixing_weight = self.compute_mixing_weight(hidden, searched_hidden, sparse_example_based_distribution)
# compute prediction distribution by interpolating between model distribution and database distribution
mixed_distribution = (1 - mixing_weight) * model_based_distribution + mixing_weight * example_based_distribution
log_probs = torch.log(mixed_distribution)
log_probs = log_probs.view(batch_size, seq_len, vocab_size).contiguous()
return log_probs
def detailed_forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> Tuple[torch.Tensor,
torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param hidden: pre-softmax hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param logits: pre-softmax hidden states FloatTensor (batch_size, seq_len, hidden_size)
:param trg: true targets for force decoding.
:return mixed_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return model_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return example_based_distribution: FloatTensor (batch_size, seq_len, vocab_size)
:return mixing_weight: FloatTensor (batch_size, seq_len)
:return bandwidth: FloatTensor (batch_size, seq_len)
"""
# reshape hidden and logits for knn retrieval
batch_size, seq_len, hidden_size = hidden.size()
vocab_size = logits.size(-1)
hidden = hidden.view(batch_size * seq_len, hidden_size)
logits = logits.view(batch_size * seq_len, vocab_size)
# retrieve examples from database
if self.training:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=True)
else:
distances, token_indices, searched_hidden = self.database.enhanced_search(hidden.cpu().numpy(),
top_k=self.top_k, retrieval_dropout=False)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
searched_hidden = torch.FloatTensor(searched_hidden).to(hidden.device)
# compute dynamic database bandwidth
bandwidth = self.compute_bandwidth(hidden, searched_hidden)
model_based_distribution = F.softmax(logits, dim=-1)
vocab_size = model_based_distribution.size(-1)
example_based_distribution, sparse_example_based_distribution = self.kernel.compute_example_based_distribution(distances,
bandwidth, token_indices, vocab_size)
mixing_weight = self.compute_mixing_weight(hidden, searched_hidden, sparse_example_based_distribution)
# compute prediction distribution by interpolating between model distribution and database distribution
mixed_distribution = (1 - mixing_weight) * model_based_distribution + mixing_weight * example_based_distribution
mixed_distribution = mixed_distribution.view(batch_size, seq_len, vocab_size).contiguous()
model_based_distribution = model_based_distribution.view(batch_size, seq_len, vocab_size).contiguous()
example_based_distribution = example_based_distribution.view(batch_size, seq_len, vocab_size).contiguous()
mixing_weight = mixing_weight.squeeze(-1).view(batch_size, seq_len).contiguous()
bandwidth = bandwidth.squeeze(-1).view(batch_size, seq_len).contiguous()
return mixed_distribution, model_based_distribution, example_based_distribution, mixing_weight, bandwidth
def build_combiner(cfg: dict) -> Combiner:
combiner_cfg = cfg["combiner"]
combiner_type = combiner_cfg["type"]
if combiner_type == "no_combiner":
combiner = NoCombiner()
elif combiner_type == "static_combiner":
database = Database(
index_path=combiner_cfg["index_path"],
token_path=combiner_cfg["token_map_path"]
)
combiner = StaticCombiner(
database=database,
top_k=combiner_cfg["top_k"],
mixing_weight=combiner_cfg["mixing_weight"],
bandwidth=combiner_cfg["bandwidth"],
kernel=GaussianKernel() if combiner_cfg["kernel"] == "gaussian" else LaplacianKernel()
)
elif "dynamic" in combiner_type:
database = EnhancedDatabase(
index_path=combiner_cfg["index_path"],
token_path=combiner_cfg["token_map_path"],
embedding_path=combiner_cfg["embedding_path"],
in_memory=combiner_cfg["in_memory"]
)
combiner = DynamicCombiner(
database=database,
top_k=combiner_cfg["top_k"],
kernel=GaussianKernel() if combiner_cfg["kernel"] == "gaussian" else LaplacianKernel()
)
else:
raise ValueError("The %s is not supported currently." % combiner_type)
return combiner | 11,897 | 46.214286 | 140 | py |
KSTER | KSTER-main/joeynmt/search.py | # coding: utf-8
import torch
import torch.nn.functional as F
from torch import Tensor
import numpy as np
from joeynmt.decoders import TransformerDecoder
from joeynmt.model import Model
from joeynmt.batch import Batch
from joeynmt.helpers import tile
__all__ = ["greedy", "transformer_greedy", "beam_search", "run_batch"]
def greedy(src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor)\
-> (np.array, np.array):
"""
Greedy decoding. Select the token word highest probability at each time
step. This function is a wrapper that calls recurrent_greedy for
recurrent decoders and transformer_greedy for transformer decoders.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder last state for decoder initialization
:return:
"""
if isinstance(model.decoder, TransformerDecoder):
# Transformer greedy decoding
greedy_fun = transformer_greedy
else:
# Recurrent greedy decoding
greedy_fun = recurrent_greedy
return greedy_fun(
src_mask, max_output_length, model, encoder_output, encoder_hidden)
def recurrent_greedy(
src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, np.array):
"""
Greedy decoding: in each step, choose the word that gets highest score.
Version for recurrent decoder.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder last state for decoder initialization
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
bos_index = model.bos_index
eos_index = model.eos_index
batch_size = src_mask.size(0)
prev_y = src_mask.new_full(size=[batch_size, 1], fill_value=bos_index,
dtype=torch.long)
output = []
attention_scores = []
hidden = None
prev_att_vector = None
finished = src_mask.new_zeros((batch_size, 1)).byte()
# pylint: disable=unused-variable
for t in range(max_output_length):
# decode one single step
with torch.no_grad():
logits, hidden, att_probs, prev_att_vector = model(
return_type="decode",
trg_input=prev_y,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
unroll_steps=1,
decoder_hidden=hidden,
att_vector=prev_att_vector)
# logits: batch x time=1 x vocab (logits)
# greedy decoding: choose arg max over vocabulary in each step
next_word = torch.argmax(logits, dim=-1) # batch x time=1
output.append(next_word.squeeze(1).detach().cpu().numpy())
prev_y = next_word
attention_scores.append(att_probs.squeeze(1).detach().cpu().numpy())
# batch, max_src_length
# check if previous symbol was <eos>
is_eos = torch.eq(next_word, eos_index)
finished += is_eos
# stop predicting if <eos> reached for all elements in batch
if (finished >= 1).sum() == batch_size:
break
stacked_output = np.stack(output, axis=1) # batch, time
stacked_attention_scores = np.stack(attention_scores, axis=1)
return stacked_output, stacked_attention_scores
# pylint: disable=unused-argument
def transformer_greedy(
src_mask: Tensor, max_output_length: int, model: Model,
encoder_output: Tensor, encoder_hidden: Tensor) -> (np.array, None):
"""
Special greedy function for transformer, since it works differently.
The transformer remembers all previous states and attends to them.
:param src_mask: mask for source inputs, 0 for positions after </s>
:param max_output_length: maximum length for the hypotheses
:param model: model to use for greedy decoding
:param encoder_output: encoder hidden states for attention
:param encoder_hidden: encoder final state (unused in Transformer)
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
bos_index = model.bos_index
eos_index = model.eos_index
batch_size = src_mask.size(0)
# start with BOS-symbol for each sentence in the batch
ys = encoder_output.new_full([batch_size, 1], bos_index, dtype=torch.long)
# a subsequent mask is intersected with this in decoder forward pass
trg_mask = src_mask.new_ones([1, 1, 1])
if isinstance(model, torch.nn.DataParallel):
trg_mask = torch.stack(
[src_mask.new_ones([1, 1]) for _ in model.device_ids])
finished = src_mask.new_zeros(batch_size).byte()
for _ in range(max_output_length):
# pylint: disable=unused-variable
with torch.no_grad():
logits, hidden, _, _ = model(
return_type="decode",
trg_input=ys, # model.trg_embed(ys) # embed the previous tokens
encoder_output=encoder_output,
encoder_hidden=None,
src_mask=src_mask,
unroll_steps=None,
decoder_hidden=None,
trg_mask=trg_mask
)
logits = logits[:, -1:]
hidden = hidden[:, -1:]
log_probs = model.combiner(hidden, logits).squeeze(1)
_, next_word = torch.max(log_probs, dim=1)
next_word = next_word.data
ys = torch.cat([ys, next_word.unsqueeze(-1)], dim=1)
# check if previous symbol was <eos>
is_eos = torch.eq(next_word, eos_index)
finished += is_eos
# stop predicting if <eos> reached for all elements in batch
if (finished >= 1).sum() == batch_size:
break
ys = ys[:, 1:] # remove BOS-symbol
return ys.detach().cpu().numpy(), None
# pylint: disable=too-many-statements,too-many-branches
def beam_search(model: Model, size: int,
encoder_output: Tensor, encoder_hidden: Tensor,
src_mask: Tensor, max_output_length: int,
alpha: float, n_best: int = 1) -> (np.array, np.array):
"""
Beam search with size k.
Inspired by OpenNMT-py, adapted for Transformer.
In each decoding step, find the k most likely partial hypotheses.
:param model:
:param size: size of the beam
:param encoder_output:
:param encoder_hidden:
:param src_mask:
:param max_output_length:
:param alpha: `alpha` factor for length penalty
:param n_best: return this many hypotheses, <= beam (currently only 1)
:return:
- stacked_output: output hypotheses (2d array of indices),
- stacked_attention_scores: attention scores (3d array)
"""
assert size > 0, 'Beam size must be >0.'
assert n_best <= size, 'Can only return {} best hypotheses.'.format(size)
# init
bos_index = model.bos_index
eos_index = model.eos_index
pad_index = model.pad_index
trg_vocab_size = model.decoder.output_size
device = encoder_output.device
transformer = isinstance(model.decoder, TransformerDecoder)
batch_size = src_mask.size(0)
att_vectors = None # not used for Transformer
hidden = None # not used for Transformer
trg_mask = None # not used for RNN
# Recurrent models only: initialize RNN hidden state
# pylint: disable=protected-access
if not transformer:
# tile encoder states and decoder initial states beam_size times
hidden = model.decoder._init_hidden(encoder_hidden)
hidden = tile(hidden, size, dim=1) # layers x batch*k x dec_hidden_size
# DataParallel splits batch along the 0th dim.
# Place back the batch_size to the 1st dim here.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2), c.permute(1, 0, 2))
else:
hidden = hidden.permute(1, 0, 2)
# batch*k x layers x dec_hidden_size
encoder_output = tile(encoder_output.contiguous(), size,
dim=0) # batch*k x src_len x enc_hidden_size
src_mask = tile(src_mask, size, dim=0) # batch*k x 1 x src_len
# Transformer only: create target mask
if transformer:
trg_mask = src_mask.new_ones([1, 1, 1]) # transformer only
if isinstance(model, torch.nn.DataParallel):
trg_mask = torch.stack(
[src_mask.new_ones([1, 1]) for _ in model.device_ids])
# numbering elements in the batch
batch_offset = torch.arange(batch_size, dtype=torch.long, device=device)
# numbering elements in the extended batch, i.e. beam size copies of each
# batch element
beam_offset = torch.arange(
0,
batch_size * size,
step=size,
dtype=torch.long,
device=device)
# keeps track of the top beam size hypotheses to expand for each element
# in the batch to be further decoded (that are still "alive")
alive_seq = torch.full(
[batch_size * size, 1],
bos_index,
dtype=torch.long,
device=device)
# Give full probability to the first beam on the first step.
topk_log_probs = torch.zeros(batch_size, size, device=device)
topk_log_probs[:, 1:] = float("-inf")
# Structure that holds finished hypotheses.
hypotheses = [[] for _ in range(batch_size)]
results = {
"predictions": [[] for _ in range(batch_size)],
"scores": [[] for _ in range(batch_size)],
"gold_score": [0] * batch_size,
}
for step in range(max_output_length):
# This decides which part of the predicted sentence we feed to the
# decoder to make the next prediction.
# For Transformer, we feed the complete predicted sentence so far.
# For Recurrent models, only feed the previous target word prediction
if transformer: # Transformer
decoder_input = alive_seq # complete prediction so far
else: # Recurrent
decoder_input = alive_seq[:, -1].view(-1, 1) # only the last word
# expand current hypotheses
# decode one single step
# logits: logits for final softmax
# pylint: disable=unused-variable
with torch.no_grad():
logits, hidden, att_scores, att_vectors = model(
return_type="decode",
encoder_output=encoder_output,
encoder_hidden=None, # used to initialize decoder_hidden only
src_mask=src_mask,
trg_input=decoder_input, #trg_embed = embed(decoder_input)
decoder_hidden=hidden,
att_vector=att_vectors,
unroll_steps=1,
trg_mask=trg_mask # subsequent mask for Transformer only
)
# For the Transformer we made predictions for all time steps up to
# this point, so we only want to know about the last time step.
if transformer:
logits = logits[:, -1:] # keep only the last time step
hidden = hidden[:, -1:] # we don't need to keep it for transformer
log_probs = model.combiner(hidden, logits).squeeze(1)
else: # RNN
log_probs = F.log_softmax(logits, dim=-1).squeeze(1)
# multiply probs by the beam probability (=add logprobs)
log_probs += topk_log_probs.view(-1).unsqueeze(1)
curr_scores = log_probs.clone()
# compute length penalty
if alpha > -1:
length_penalty = ((5.0 + (step + 1)) / 6.0) ** alpha
curr_scores /= length_penalty
# flatten log_probs into a list of possibilities
curr_scores = curr_scores.reshape(-1, size * trg_vocab_size)
# pick currently best top k hypotheses (flattened order)
topk_scores, topk_ids = curr_scores.topk(size, dim=-1)
if alpha > -1:
# recover original log probs
topk_log_probs = topk_scores * length_penalty
else:
topk_log_probs = topk_scores.clone()
# reconstruct beam origin and true word ids from flattened order
topk_beam_index = topk_ids.floor_divide(trg_vocab_size)
topk_ids = topk_ids.fmod(trg_vocab_size)
# map beam_index to batch_index in the flat representation
batch_index = (
topk_beam_index
+ beam_offset[:topk_beam_index.size(0)].unsqueeze(1))
select_indices = batch_index.view(-1)
# append latest prediction
alive_seq = torch.cat(
[alive_seq.index_select(0, select_indices),
topk_ids.view(-1, 1)], -1) # batch_size*k x hyp_len
is_finished = topk_ids.eq(eos_index)
if step + 1 == max_output_length:
is_finished.fill_(True)
# end condition is whether the top beam is finished
end_condition = is_finished[:, 0].eq(True)
# save finished hypotheses
if is_finished.any():
predictions = alive_seq.view(-1, size, alive_seq.size(-1))
for i in range(is_finished.size(0)):
b = batch_offset[i]
if end_condition[i]:
is_finished[i].fill_(1)
finished_hyp = is_finished[i].nonzero(as_tuple=False).view(-1)
# store finished hypotheses for this batch
for j in finished_hyp:
# Check if the prediction has more than one EOS.
# If it has more than one EOS, it means that the
# prediction should have already been added to
# the hypotheses, so you don't have to add them again.
if (predictions[i, j, 1:] == eos_index).nonzero(
as_tuple=False).numel() < 2:
# ignore start_token
hypotheses[b].append(
(topk_scores[i, j], predictions[i, j, 1:])
)
# if the batch reached the end, save the n_best hypotheses
if end_condition[i]:
best_hyp = sorted(
hypotheses[b], key=lambda x: x[0], reverse=True)
for n, (score, pred) in enumerate(best_hyp):
if n >= n_best:
break
results["scores"][b].append(score)
results["predictions"][b].append(pred)
non_finished = end_condition.eq(False).nonzero(
as_tuple=False).view(-1)
# if all sentences are translated, no need to go further
# pylint: disable=len-as-condition
if len(non_finished) == 0:
break
# remove finished batches for the next step
topk_log_probs = topk_log_probs.index_select(0, non_finished)
batch_index = batch_index.index_select(0, non_finished)
batch_offset = batch_offset.index_select(0, non_finished)
alive_seq = predictions.index_select(0, non_finished) \
.view(-1, alive_seq.size(-1))
# reorder indices, outputs and masks
select_indices = batch_index.view(-1)
encoder_output = encoder_output.index_select(0, select_indices)
src_mask = src_mask.index_select(0, select_indices)
if hidden is not None and not transformer:
if isinstance(hidden, tuple):
# for LSTMs, states are tuples of tensors
h, c = hidden
h = h.index_select(0, select_indices)
c = c.index_select(0, select_indices)
hidden = (h, c)
else:
# for GRUs, states are single tensors
hidden = hidden.index_select(0, select_indices)
if att_vectors is not None:
att_vectors = att_vectors.index_select(0, select_indices)
def pad_and_stack_hyps(hyps, pad_value):
filled = np.ones((len(hyps), max([h.shape[0] for h in hyps])),
dtype=int) * pad_value
for j, h in enumerate(hyps):
for k, i in enumerate(h):
filled[j, k] = i
return filled
# from results to stacked outputs
assert n_best == 1
# only works for n_best=1 for now
final_outputs = pad_and_stack_hyps([r[0].cpu().numpy() for r in
results["predictions"]],
pad_value=pad_index)
return final_outputs, None
def run_batch(model: Model, batch: Batch, max_output_length: int,
beam_size: int, beam_alpha: float) -> (np.array, np.array):
"""
Get outputs and attentions scores for a given batch
:param model: Model class
:param batch: batch to generate hypotheses for
:param max_output_length: maximum length of hypotheses
:param beam_size: size of the beam for beam search, if 0 use greedy
:param beam_alpha: alpha value for beam search
:return: stacked_output: hypotheses for batch,
stacked_attention_scores: attention scores for batch
"""
with torch.no_grad():
encoder_output, encoder_hidden, _, _ = model(
return_type="encode", **vars(batch))
# if maximum output length is not globally specified, adapt to src len
if max_output_length is None:
max_output_length = int(max(batch.src_length.cpu().numpy()) * 1.5)
# greedy decoding
if beam_size < 2:
stacked_output, stacked_attention_scores = greedy(
src_mask=batch.src_mask,
max_output_length=max_output_length,
model=model,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden
)
# batch, time, max_src_length
else: # beam search
stacked_output, stacked_attention_scores = beam_search(
model=model,
size=beam_size,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=batch.src_mask,
max_output_length=max_output_length,
alpha=beam_alpha
)
return stacked_output, stacked_attention_scores
| 18,708 | 39.321121 | 88 | py |
KSTER | KSTER-main/joeynmt/attention.py | # coding: utf-8
"""
Attention modules
"""
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
class AttentionMechanism(nn.Module):
"""
Base attention class
"""
def forward(self, *inputs):
raise NotImplementedError("Implement this.")
class BahdanauAttention(AttentionMechanism):
"""
Implements Bahdanau (MLP) attention
Section A.1.2 in https://arxiv.org/pdf/1409.0473.pdf.
"""
def __init__(self, hidden_size=1, key_size=1, query_size=1):
"""
Creates attention mechanism.
:param hidden_size: size of the projection for query and key
:param key_size: size of the attention input keys
:param query_size: size of the query
"""
super().__init__()
self.key_layer = nn.Linear(key_size, hidden_size, bias=False)
self.query_layer = nn.Linear(query_size, hidden_size, bias=False)
self.energy_layer = nn.Linear(hidden_size, 1, bias=False)
self.proj_keys = None # to store projected keys
self.proj_query = None # projected query
#pylint: disable=arguments-differ
def forward(self, query: Tensor = None,
mask: Tensor = None,
values: Tensor = None):
"""
Bahdanau MLP attention forward pass.
:param query: the item (decoder state) to compare with the keys/memory,
shape (batch_size, 1, decoder.hidden_size)
:param mask: mask out keys position (0 in invalid positions, 1 else),
shape (batch_size, 1, src_length)
:param values: values (encoder states),
shape (batch_size, src_length, encoder.hidden_size)
:return: context vector of shape (batch_size, 1, value_size),
attention probabilities of shape (batch_size, 1, src_length)
"""
self._check_input_shapes_forward(query=query, mask=mask, values=values)
assert mask is not None, "mask is required"
assert self.proj_keys is not None,\
"projection keys have to get pre-computed"
# We first project the query (the decoder state).
# The projected keys (the encoder states) were already pre-computated.
self.compute_proj_query(query)
# Calculate scores.
# proj_keys: batch x src_len x hidden_size
# proj_query: batch x 1 x hidden_size
scores = self.energy_layer(torch.tanh(self.proj_query + self.proj_keys))
# scores: batch x src_len x 1
scores = scores.squeeze(2).unsqueeze(1)
# scores: batch x 1 x time
# mask out invalid positions by filling the masked out parts with -inf
scores = torch.where(mask, scores, scores.new_full([1], float('-inf')))
# turn scores to probabilities
alphas = F.softmax(scores, dim=-1) # batch x 1 x time
# the context vector is the weighted sum of the values
context = alphas @ values # batch x 1 x value_size
return context, alphas
def compute_proj_keys(self, keys: Tensor):
"""
Compute the projection of the keys.
Is efficient if pre-computed before receiving individual queries.
:param keys:
:return:
"""
self.proj_keys = self.key_layer(keys)
def compute_proj_query(self, query: Tensor):
"""
Compute the projection of the query.
:param query:
:return:
"""
self.proj_query = self.query_layer(query)
def _check_input_shapes_forward(self, query: torch.Tensor,
mask: torch.Tensor,
values: torch.Tensor):
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param query:
:param mask:
:param values:
:return:
"""
assert query.shape[0] == values.shape[0] == mask.shape[0]
assert query.shape[1] == 1 == mask.shape[1]
assert query.shape[2] == self.query_layer.in_features
assert values.shape[2] == self.key_layer.in_features
assert mask.shape[2] == values.shape[1]
def __repr__(self):
return "BahdanauAttention"
class LuongAttention(AttentionMechanism):
"""
Implements Luong (bilinear / multiplicative) attention.
Eq. 8 ("general") in http://aclweb.org/anthology/D15-1166.
"""
def __init__(self, hidden_size: int = 1, key_size: int = 1):
"""
Creates attention mechanism.
:param hidden_size: size of the key projection layer, has to be equal
to decoder hidden size
:param key_size: size of the attention input keys
"""
super().__init__()
self.key_layer = nn.Linear(in_features=key_size,
out_features=hidden_size,
bias=False)
self.proj_keys = None # projected keys
# pylint: disable=arguments-differ
def forward(self, query: torch.Tensor = None,
mask: torch.Tensor = None,
values: torch.Tensor = None):
"""
Luong (multiplicative / bilinear) attention forward pass.
Computes context vectors and attention scores for a given query and
all masked values and returns them.
:param query: the item (decoder state) to compare with the keys/memory,
shape (batch_size, 1, decoder.hidden_size)
:param mask: mask out keys position (0 in invalid positions, 1 else),
shape (batch_size, 1, src_length)
:param values: values (encoder states),
shape (batch_size, src_length, encoder.hidden_size)
:return: context vector of shape (batch_size, 1, value_size),
attention probabilities of shape (batch_size, 1, src_length)
"""
self._check_input_shapes_forward(query=query, mask=mask, values=values)
assert self.proj_keys is not None,\
"projection keys have to get pre-computed"
assert mask is not None, "mask is required"
# scores: batch_size x 1 x src_length
scores = query @ self.proj_keys.transpose(1, 2)
# mask out invalid positions by filling the masked out parts with -inf
scores = torch.where(mask, scores, scores.new_full([1], float('-inf')))
# turn scores to probabilities
alphas = F.softmax(scores, dim=-1) # batch x 1 x src_len
# the context vector is the weighted sum of the values
context = alphas @ values # batch x 1 x values_size
return context, alphas
def compute_proj_keys(self, keys: Tensor):
"""
Compute the projection of the keys and assign them to `self.proj_keys`.
This pre-computation is efficiently done for all keys
before receiving individual queries.
:param keys: shape (batch_size, src_length, encoder.hidden_size)
"""
# proj_keys: batch x src_len x hidden_size
self.proj_keys = self.key_layer(keys)
def _check_input_shapes_forward(self, query: torch.Tensor,
mask: torch.Tensor,
values: torch.Tensor):
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param query:
:param mask:
:param values:
:return:
"""
assert query.shape[0] == values.shape[0] == mask.shape[0]
assert query.shape[1] == 1 == mask.shape[1]
assert query.shape[2] == self.key_layer.out_features
assert values.shape[2] == self.key_layer.in_features
assert mask.shape[2] == values.shape[1]
def __repr__(self):
return "LuongAttention"
| 7,824 | 33.933036 | 80 | py |
KSTER | KSTER-main/joeynmt/helpers.py | # coding: utf-8
"""
Collection of helper functions
"""
import copy
import glob
import os
import os.path
import errno
import shutil
import random
import logging
from typing import Optional, List
import pathlib
import numpy as np
import pkg_resources
import torch
from torch import nn, Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
import yaml
from joeynmt.vocabulary import Vocabulary
from joeynmt.plotting import plot_heatmap
from sacremoses import MosesDetokenizer
class ConfigurationError(Exception):
""" Custom exception for misspecifications of configuration """
def make_model_dir(model_dir: str, overwrite=False) -> str:
"""
Create a new directory for the model.
:param model_dir: path to model directory
:param overwrite: whether to overwrite an existing directory
:return: path to model directory
"""
if os.path.isdir(model_dir):
if not overwrite:
raise FileExistsError(
"Model directory exists and overwriting is disabled.")
# delete previous directory to start with empty dir again
shutil.rmtree(model_dir)
os.makedirs(model_dir)
return model_dir
def make_logger(log_dir: str = None, mode: str = "train") -> str:
"""
Create a logger for logging the training/testing process.
:param log_dir: path to file where log is stored as well
:param mode: log file name. 'train', 'test' or 'translate'
:return: joeynmt version number
"""
logger = logging.getLogger("") # root logger
version = pkg_resources.require("joeynmt")[0].version
# add handlers only once.
if len(logger.handlers) == 0:
logger.setLevel(level=logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(name)s - %(message)s')
if log_dir is not None:
if os.path.exists(log_dir):
log_file = f'{log_dir}/{mode}.log'
fh = logging.FileHandler(log_file)
fh.setLevel(level=logging.DEBUG)
logger.addHandler(fh)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.info("Hello! This is Joey-NMT (version %s).", version)
return version
def log_cfg(cfg: dict, prefix: str = "cfg") -> None:
"""
Write configuration to log.
:param cfg: configuration to log
:param prefix: prefix for logging
"""
logger = logging.getLogger(__name__)
for k, v in cfg.items():
if isinstance(v, dict):
p = '.'.join([prefix, k])
log_cfg(v, prefix=p)
else:
p = '.'.join([prefix, k])
logger.info("{:34s} : {}".format(p, v))
def clones(module: nn.Module, n: int) -> nn.ModuleList:
"""
Produce N identical layers. Transformer helper function.
:param module: the module to clone
:param n: clone this many times
:return cloned modules
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def subsequent_mask(size: int) -> Tensor:
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
return torch.from_numpy(mask) == 0
def set_seed(seed: int) -> None:
"""
Set the random seed for modules torch, numpy and random.
:param seed: random seed
"""
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def log_data_info(train_data: Dataset, valid_data: Dataset, test_data: Dataset,
src_vocab: Vocabulary, trg_vocab: Vocabulary) -> None:
"""
Log statistics of data and vocabulary.
:param train_data:
:param valid_data:
:param test_data:
:param src_vocab:
:param trg_vocab:
"""
logger = logging.getLogger(__name__)
logger.info("Data set sizes: \n\ttrain %d,\n\tvalid %d,\n\ttest %d",
len(train_data), len(valid_data),
len(test_data) if test_data is not None else 0)
logger.info("First training example:\n\t[SRC] %s\n\t[TRG] %s",
" ".join(vars(train_data[0])['src']),
" ".join(vars(train_data[0])['trg']))
logger.info(
"First 10 words (src): %s",
" ".join('(%d) %s' % (i, t) for i, t in enumerate(src_vocab.itos[:10])))
logger.info(
"First 10 words (trg): %s",
" ".join('(%d) %s' % (i, t) for i, t in enumerate(trg_vocab.itos[:10])))
logger.info("Number of Src words (types): %d", len(src_vocab))
logger.info("Number of Trg words (types): %d", len(trg_vocab))
def load_config(path="configs/default.yaml") -> dict:
"""
Loads and parses a YAML configuration file.
:param path: path to YAML configuration file
:return: configuration dictionary
"""
with open(path, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
return cfg
def bpe_postprocess(string, bpe_type="subword-nmt") -> str:
"""
Post-processor for BPE output. Recombines BPE-split tokens.
:param string:
:param bpe_type: one of {"sentencepiece", "subword-nmt"}
:return: post-processed string
"""
if bpe_type == "sentencepiece":
ret = string.replace(" ", "").replace("▁", " ").strip()
elif bpe_type == "subword-nmt":
ret = string.replace("@@ ", "").strip()
else:
ret = string.strip()
return ret
def store_attention_plots(attentions: np.array,
targets: List[List[str]],
sources: List[List[str]],
output_prefix: str,
indices: List[int],
tb_writer: Optional[SummaryWriter] = None,
steps: int = 0) -> None:
"""
Saves attention plots.
:param attentions: attention scores
:param targets: list of tokenized targets
:param sources: list of tokenized sources
:param output_prefix: prefix for attention plots
:param indices: indices selected for plotting
:param tb_writer: Tensorboard summary writer (optional)
:param steps: current training steps, needed for tb_writer
:param dpi: resolution for images
"""
for i in indices:
if i >= len(sources):
continue
plot_file = "{}.{}.pdf".format(output_prefix, i)
src = sources[i]
trg = targets[i]
attention_scores = attentions[i].T
try:
fig = plot_heatmap(scores=attention_scores,
column_labels=trg,
row_labels=src,
output_path=plot_file,
dpi=100)
if tb_writer is not None:
# lower resolution for tensorboard
fig = plot_heatmap(scores=attention_scores,
column_labels=trg,
row_labels=src,
output_path=None,
dpi=50)
tb_writer.add_figure("attention/{}.".format(i),
fig,
global_step=steps)
# pylint: disable=bare-except
except:
print("Couldn't plot example {}: src len {}, trg len {}, "
"attention scores shape {}".format(i, len(src), len(trg),
attention_scores.shape))
continue
def get_latest_checkpoint(ckpt_dir: str) -> Optional[str]:
"""
Returns the latest checkpoint (by time) from the given directory.
If there is no checkpoint in this directory, returns None
:param ckpt_dir:
:return: latest checkpoint file
"""
list_of_files = glob.glob("{}/*.ckpt".format(ckpt_dir))
latest_checkpoint = None
if list_of_files:
latest_checkpoint = max(list_of_files, key=os.path.getctime)
# check existence
if latest_checkpoint is None:
raise FileNotFoundError(
"No checkpoint found in directory {}.".format(ckpt_dir))
return latest_checkpoint
def load_checkpoint(path: str, use_cuda: bool = True) -> dict:
"""
Load model from saved checkpoint.
:param path: path to checkpoint
:param use_cuda: using cuda or not
:return: checkpoint (dict)
"""
assert os.path.isfile(path), "Checkpoint %s not found" % path
checkpoint = torch.load(path, map_location='cuda' if use_cuda else 'cpu')
return checkpoint
# from onmt
def tile(x: Tensor, count: int, dim=0) -> Tensor:
"""
Tiles x on dimension dim count times. From OpenNMT. Used for beam search.
:param x: tensor to tile
:param count: number of tiles
:param dim: dimension along which the tensor is tiled
:return: tiled tensor
"""
if isinstance(x, tuple):
h, c = x
return tile(h, count, dim=dim), tile(c, count, dim=dim)
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def freeze_params(module: nn.Module) -> None:
"""
Freeze the parameters of this module,
i.e. do not update them during training
:param module: freeze parameters of this module
"""
for _, p in module.named_parameters():
p.requires_grad = False
def symlink_update(target, link_name):
try:
os.symlink(target, link_name)
except FileExistsError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def latest_checkpoint_update(target: pathlib.Path,
link_name: str) -> Optional[pathlib.Path]:
"""
This function finds the file that the symlink currently points to, sets it
to the new target, and returns the previous target if it exists.
:param target: A path to a file that we want the symlink to point to.
:param link_name: This is the name of the symlink that we want to update.
:return:
- current_last: This is the previous target of the symlink, before it is
updated in this function. If the symlink did not exist before or did
not have a target, None is returned instead.
"""
link = pathlib.Path(link_name)
if link.is_symlink():
current_last = link.resolve()
link.unlink()
link.symlink_to(target)
return current_last
link.symlink_to(target)
return None
def get_sacrebleu_description(cfg: dict) -> dict:
"""
This function extracts sacrebleu description dict from the configuration dict.
:param cfg: the configuration dict
"""
data_config = cfg["data"]
test_config = cfg["testing"]
sacrebleu_description = {"remove_whitespace": True, "tokenize": "13a"}
if "sacrebleu" in test_config.keys():
sacrebleu_description["remove_whitespace"] = test_config["sacrebleu"] \
.get("remove_whitespace", True)
sacrebleu_description["tokenize"] = test_config["sacrebleu"] \
.get("tokenize", "13a")
sacrebleu_description["use_detokenization"] = test_config["sacrebleu"] \
.get("use_detokenization", False)
if sacrebleu_description["use_detokenization"]:
src_detokenizer = MosesDetokenizer(lang=data_config["src"])
trg_detokenizer = MosesDetokenizer(lang=data_config["trg"])
def batch_src_detokenize(sentences):
results = []
for sentence in sentences:
results.append(src_detokenizer.detokenize(sentence.split()))
return results
def batch_trg_detokenize(sentences):
results = []
for sentence in sentences:
results.append(trg_detokenizer.detokenize(sentence.split()))
return results
sacrebleu_description["batch_src_detokenize"] = batch_src_detokenize
sacrebleu_description["batch_trg_detokenize"] = batch_trg_detokenize
return sacrebleu_description
def check_combiner_cfg(combiner_cfg: dict) -> None:
"""
This function is used to validate that the merged combiner config is valid.
:param combiner_cfg: the merged combiner config dict
"""
assert combiner_cfg["type"] in ["no_combiner", "static_combiner",
"dynamic_combiner"], "combiner type %s is not supported currently." % combiner_cfg["type"]
if combiner_cfg["type"] in ["static_combiner", "dynamic_combiner"]:
for key in ["top_k", "kernel"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
for key in ["index_path", "token_map_path"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
path = combiner_cfg[key]
assert os.path.exists(path), "%s does not exist" % path
if combiner_cfg["type"] == "static_combiner":
for key in ["mixing_weight", "bandwidth"]:
assert combiner_cfg[key] is not None, "%s is needed in %s" % (key, combiner_cfg["type"])
if combiner_cfg["type"] == "dynamic_combiner":
assert combiner_cfg["embedding_path"] is not None, "%s is needed in %s" % ("embedding_path", combiner_cfg["type"])
path = combiner_cfg["embedding_path"]
assert os.path.exists(path), "%s does not exist" % path | 14,240 | 32.587264 | 122 | py |
KSTER | KSTER-main/joeynmt/combiner_training.py | # coding: utf-8
"""
Training module
"""
import argparse
import time
import shutil
from typing import List
import logging
import os
import sys
import collections
import pathlib
import numpy as np
import torch
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, latest_checkpoint_update, \
ConfigurationError, get_sacrebleu_description, check_combiner_cfg
from joeynmt.model import Model, _DataParallel
from joeynmt.prediction import validate_on_data
from joeynmt.loss import XentLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
from joeynmt.combiners import build_combiner
# for fp16 training
try:
from apex import amp
amp.register_half_function(torch, "einsum")
except ImportError as no_apex:
# error handling in CombinerTrainManager object construction
pass
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class CombinerTrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict,
batch_class: Batch = Batch) -> None:
"""
Creates a new CombinerTrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
:param batch_class: batch class to encapsulate the torch class
"""
combiner_train_config = config["combiner_training"]
self.batch_class = batch_class
# files for logging and storing
self.model_dir = combiner_train_config["model_dir"]
assert os.path.exists(self.model_dir)
self.logging_freq = combiner_train_config.get("logging_freq", 100)
self.valid_report_file = "{}/validations.txt".format(self.model_dir)
self.tb_writer = SummaryWriter(log_dir=self.model_dir + "/tensorboard/")
self.save_latest_checkpoint = combiner_train_config.get("save_latest_ckpt", True)
# model
self.model = model
self._log_parameters_list()
# objective
self.label_smoothing = combiner_train_config.get("label_smoothing", 0.0)
self.model.loss_function = XentLoss(pad_index=self.model.pad_index,
smoothing=self.label_smoothing)
self.normalization = combiner_train_config.get("normalization", "batch")
if self.normalization not in ["batch", "tokens", "none"]:
raise ConfigurationError("Invalid normalization option."
"Valid options: "
"'batch', 'tokens', 'none'.")
# optimization
self.learning_rate_min = combiner_train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=combiner_train_config)
self.optimizer = build_optimizer(config=combiner_train_config,
parameters=self.model.combiner.parameters())
# validation & early stopping
self.validation_freq = combiner_train_config.get("validation_freq", 1000)
self.log_valid_sents = combiner_train_config.get("print_valid_sents", [0, 1, 2])
self.ckpt_queue = collections.deque(
maxlen=combiner_train_config.get("keep_last_ckpts", 5))
self.eval_metric = combiner_train_config.get("eval_metric", "bleu")
if self.eval_metric not in [
'bleu', 'chrf', 'token_accuracy', 'sequence_accuracy'
]:
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: 'bleu', 'chrf', "
"'token_accuracy', 'sequence_accuracy'.")
self.early_stopping_metric = combiner_train_config.get("early_stopping_metric",
"eval_metric")
# early_stopping_metric decides on how to find the early stopping point:
# ckpts are written when there's a new high/low score for this metric.
# If we schedule after BLEU/chrf/accuracy, we want to maximize the
# score, else we want to minimize it.
if self.early_stopping_metric in ["ppl", "loss"]:
self.minimize_metric = True
elif self.early_stopping_metric == "eval_metric":
if self.eval_metric in [
"bleu", "chrf", "token_accuracy", "sequence_accuracy"
]:
self.minimize_metric = False
# eval metric that has to get minimized (not yet implemented)
else:
self.minimize_metric = True
else:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', 'eval_metric'.")
# eval options
test_config = config["testing"]
self.bpe_type = test_config.get("bpe_type", "subword-nmt")
self.sacrebleu = get_sacrebleu_description(config)
# learning rate scheduling
self.scheduler, self.scheduler_step_at = build_scheduler(
config=combiner_train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=config["model"]["encoder"]["hidden_size"])
# data & batch handling
self.level = config["data"]["level"]
if self.level not in ["word", "bpe", "char"]:
raise ConfigurationError("Invalid segmentation level. "
"Valid options: 'word', 'bpe', 'char'.")
self.shuffle = combiner_train_config.get("shuffle", True)
self.epochs = combiner_train_config["epochs"]
self.batch_size = combiner_train_config["batch_size"]
# Placeholder so that we can use the train_iter in other functions.
self.train_iter = None
self.train_iter_state = None
# per-device batch_size = self.batch_size // self.n_gpu
self.batch_type = combiner_train_config.get("batch_type", "sentence")
self.eval_batch_size = combiner_train_config.get("eval_batch_size",
self.batch_size)
# per-device eval_batch_size = self.eval_batch_size // self.n_gpu
self.eval_batch_type = combiner_train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = combiner_train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = combiner_train_config.get("max_output_length", None)
# CPU / GPU
self.use_cuda = combiner_train_config["use_cuda"] and torch.cuda.is_available()
self.n_gpu = torch.cuda.device_count() if self.use_cuda else 0
self.device = torch.device("cuda" if self.use_cuda else "cpu")
if self.use_cuda:
self.model.to(self.device)
# fp16
self.fp16 = combiner_train_config.get("fp16", False)
if self.fp16:
if 'apex' not in sys.modules:
raise ImportError("Please install apex from "
"https://www.github.com/nvidia/apex "
"to use fp16 training.") from no_apex
self.model, self.optimizer = amp.initialize(self.model,
self.optimizer,
opt_level='O1')
# opt level: one of {"O0", "O1", "O2", "O3"}
# see https://nvidia.github.io/apex/amp.html#opt-levels
# initialize training statistics
self.stats = self.TrainStatistics(
steps=0,
stop=False,
total_tokens=0,
best_ckpt_iter=0,
best_ckpt_score=np.inf if self.minimize_metric else -np.inf,
minimize_metric=self.minimize_metric,
max_steps=combiner_train_config["max_steps"])
# model parameters
if "load_model" in combiner_train_config.keys():
self.init_from_checkpoint(
combiner_train_config["load_model"],
reset_best_ckpt=combiner_train_config.get("reset_best_ckpt", False),
reset_scheduler=combiner_train_config.get("reset_scheduler", False),
reset_optimizer=combiner_train_config.get("reset_optimizer", False),
reset_iter_state=combiner_train_config.get("reset_iter_state", False))
# multi-gpu training (should be after apex fp16 initialization)
if self.n_gpu > 1:
self.model = _DataParallel(self.model)
def _save_checkpoint(self, new_best: bool = True) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
:param new_best: This boolean signals which symlink we will use for the
new checkpoint. If it is true, we update best.ckpt, else latest.ckpt.
"""
combiner_path = os.path.join(self.model_dir,
"{}.ckpt".format(self.stats.steps))
combiner_state_dict = self.model.combiner.module.state_dict() \
if isinstance(self.model.combiner, torch.nn.DataParallel) \
else self.model.combiner.state_dict()
state = {
"steps":
self.stats.steps,
"total_tokens":
self.stats.total_tokens,
"best_ckpt_score":
self.stats.best_ckpt_score,
"best_ckpt_iteration":
self.stats.best_ckpt_iter,
"model_state":
combiner_state_dict,
"optimizer_state":
self.optimizer.state_dict(),
"scheduler_state":
self.scheduler.state_dict() if self.scheduler is not None else None,
'amp_state':
amp.state_dict() if self.fp16 else None,
"train_iter_state":
self.train_iter.state_dict()
}
torch.save(state, combiner_path)
symlink_target = "{}.ckpt".format(self.stats.steps)
if new_best:
if len(self.ckpt_queue) == self.ckpt_queue.maxlen:
to_delete = self.ckpt_queue.popleft() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
logger.warning(
"Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.append(combiner_path)
best_path = "{}/best.ckpt".format(self.model_dir)
try:
# create/modify symbolic link for best checkpoint
symlink_update(symlink_target, best_path)
except OSError:
# overwrite best.ckpt
torch.save(state, best_path)
if self.save_latest_checkpoint:
last_path = "{}/latest.ckpt".format(self.model_dir)
previous_path = latest_checkpoint_update(symlink_target, last_path)
# If the last ckpt is in the ckpt_queue, we don't want to delete it.
can_delete = True
for ckpt_path in self.ckpt_queue:
if pathlib.Path(ckpt_path).resolve() == previous_path:
can_delete = False
break
if can_delete and previous_path is not None:
os.remove(previous_path)
def init_from_checkpoint(self,
path: str,
reset_best_ckpt: bool = False,
reset_scheduler: bool = False,
reset_optimizer: bool = False,
reset_iter_state: bool = False) -> None:
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
:param reset_best_ckpt: reset tracking of the best checkpoint,
use for domain adaptation with a new dev
set or when using a new metric for fine-tuning.
:param reset_scheduler: reset the learning rate scheduler, and do not
use the one stored in the checkpoint.
:param reset_optimizer: reset the optimizer, and do not use the one
stored in the checkpoint.
:param reset_iter_state: reset the sampler's internal state and do not
use the one stored in the checkpoint.
"""
logger.info("Loading model from %s", path)
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset_optimizer:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
else:
logger.info("Reset optimizer.")
if not reset_scheduler:
if model_checkpoint["scheduler_state"] is not None and \
self.scheduler is not None:
self.scheduler.load_state_dict(
model_checkpoint["scheduler_state"])
else:
logger.info("Reset scheduler.")
# restore counts
self.stats.steps = model_checkpoint["steps"]
self.stats.total_tokens = model_checkpoint["total_tokens"]
if not reset_best_ckpt:
self.stats.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.stats.best_ckpt_iter = model_checkpoint["best_ckpt_iteration"]
else:
logger.info("Reset tracking of the best checkpoint.")
if (not reset_iter_state
and model_checkpoint.get('train_iter_state', None) is not None):
self.train_iter_state = model_checkpoint["train_iter_state"]
# move parameters to cuda
if self.use_cuda:
self.model.to(self.device)
# fp16
if self.fp16 and model_checkpoint.get("amp_state", None) is not None:
amp.load_state_dict(model_checkpoint['amp_state'])
# pylint: disable=unnecessary-comprehension
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def train_and_validate(self, train_data: Dataset, valid_data: Dataset) \
-> None:
"""
Train the model and validate it from time to time on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
self.train_iter = make_data_iter(train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True,
shuffle=self.shuffle)
if self.train_iter_state is not None:
self.train_iter.load_state_dict(self.train_iter_state)
#################################################################
# simplify accumulation logic:
#################################################################
# for epoch in range(epochs):
# self.model.zero_grad()
# epoch_loss = 0.0
# batch_loss = 0.0
# for i, batch in enumerate(iter(self.train_iter)):
#
# # gradient accumulation:
# # loss.backward() inside _train_step()
# batch_loss += self._train_step(inputs)
#
# if (i + 1) % self.batch_multiplier == 0:
# self.optimizer.step() # update!
# self.model.zero_grad() # reset gradients
# self.steps += 1 # increment counter
#
# epoch_loss += batch_loss # accumulate batch loss
# batch_loss = 0 # reset batch loss
#
# # leftovers are just ignored.
#################################################################
logger.info(
"Train stats:\n"
"\tdevice: %s\n"
"\tn_gpu: %d\n"
"\t16-bits training: %r\n"
"\tgradient accumulation: %d\n"
"\tbatch size per device: %d\n"
"\ttotal batch size (w. parallel & accumulation): %d", self.device,
self.n_gpu, self.fp16, self.batch_multiplier, self.batch_size //
self.n_gpu if self.n_gpu > 1 else self.batch_size,
self.batch_size * self.batch_multiplier)
self.model.eval()
for epoch_no in range(self.epochs):
logger.info("EPOCH %d", epoch_no + 1)
if self.scheduler is not None and self.scheduler_step_at == "epoch":
self.scheduler.step(epoch=epoch_no)
self.model.combiner.train()
# Reset statistics for each epoch.
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
self.model.combiner.zero_grad()
epoch_loss = 0
batch_loss = 0
for i, batch in enumerate(iter(self.train_iter)):
# create a Batch object from torchtext batch
batch = self.batch_class(batch, self.model.pad_index,
use_cuda=self.use_cuda)
# get batch loss
batch_loss += self._train_step(batch)
# update!
if (i + 1) % self.batch_multiplier == 0:
# clip gradients (in-place)
if self.clip_grad_fun is not None:
if self.fp16:
self.clip_grad_fun(
params=amp.master_params(self.optimizer))
else:
self.clip_grad_fun(params=self.model.combiner.parameters())
# make gradient step
self.optimizer.step()
# decay lr
if self.scheduler is not None \
and self.scheduler_step_at == "step":
self.scheduler.step()
# reset gradients
self.model.combiner.zero_grad()
# increment step counter
self.stats.steps += 1
if self.stats.steps >= self.stats.max_steps:
self.stats.stop = True
# log learning progress
if self.stats.steps % self.logging_freq == 0:
self.tb_writer.add_scalar("train/train_batch_loss",
batch_loss, self.stats.steps)
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.stats.total_tokens - start_tokens
logger.info(
"Epoch %3d, Step: %8d, Batch Loss: %12.6f, "
"Tokens per Sec: %8.0f, Lr: %.6f", epoch_no + 1,
self.stats.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
start_tokens = self.stats.total_tokens
# Only add complete loss of full mini-batch to epoch_loss
epoch_loss += batch_loss # accumulate epoch_loss
batch_loss = 0 # rest batch_loss
# validate on the entire dev set
if self.stats.steps % self.validation_freq == 0:
valid_duration = self._validate(valid_data, epoch_no)
total_valid_duration += valid_duration
if self.stats.stop:
break
if self.stats.stop:
logger.info('Training ended since minimum lr %f was reached.',
self.learning_rate_min)
break
logger.info('Epoch %3d: total training loss %.2f', epoch_no + 1,
epoch_loss)
else:
logger.info('Training ended after %3d epochs.', epoch_no + 1)
logger.info('Best validation result (greedy) at step %8d: %6.2f %s.',
self.stats.best_ckpt_iter, self.stats.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_step(self, batch: Batch) -> Tensor:
"""
Train the model on one batch: Compute the loss.
:param batch: training batch
:return: loss for batch (sum)
"""
# reactivate training
self.model.combiner.train()
# get loss
batch_loss, _, _, _ = self.model(return_type="combiner_loss", **vars(batch))
# sum multi-gpu losses
if self.n_gpu > 1:
batch_loss = batch_loss.sum()
# normalize batch loss
if self.normalization == "batch":
normalizer = batch.nseqs
elif self.normalization == "tokens":
normalizer = batch.ntokens
elif self.normalization == "none":
normalizer = 1
else:
raise NotImplementedError("Only normalize by 'batch' or 'tokens' "
"or summation of loss 'none' implemented")
norm_batch_loss = batch_loss / normalizer
if self.n_gpu > 1:
norm_batch_loss = norm_batch_loss / self.n_gpu
if self.batch_multiplier > 1:
norm_batch_loss = norm_batch_loss / self.batch_multiplier
# accumulate gradients
if self.fp16:
with amp.scale_loss(norm_batch_loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
norm_batch_loss.backward()
# increment token counter
self.stats.total_tokens += batch.ntokens
return norm_batch_loss.item()
def _validate(self, valid_data, epoch_no):
valid_start_time = time.time()
valid_score, valid_loss, valid_ppl, valid_sources, \
valid_sources_raw, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores = \
validate_on_data(
batch_size=self.eval_batch_size,
batch_class=self.batch_class,
data=valid_data,
eval_metric=self.eval_metric,
level=self.level, model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
compute_loss=True,
beam_size=1, # greedy validations
batch_type=self.eval_batch_type,
postprocess=True, # always remove BPE for validation
bpe_type=self.bpe_type, # "subword-nmt" or "sentencepiece"
sacrebleu=self.sacrebleu, # sacrebleu options
n_gpu=self.n_gpu
)
self.tb_writer.add_scalar("valid/valid_loss", valid_loss,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_score", valid_score,
self.stats.steps)
self.tb_writer.add_scalar("valid/valid_ppl", valid_ppl,
self.stats.steps)
if self.early_stopping_metric == "loss":
ckpt_score = valid_loss
elif self.early_stopping_metric in ["ppl", "perplexity"]:
ckpt_score = valid_ppl
else:
ckpt_score = valid_score
if self.scheduler is not None \
and self.scheduler_step_at == "validation":
self.scheduler.step(ckpt_score)
new_best = False
if self.stats.is_best(ckpt_score):
self.stats.best_ckpt_score = ckpt_score
self.stats.best_ckpt_iter = self.stats.steps
logger.info('Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxlen > 0:
logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint(new_best)
elif self.save_latest_checkpoint:
self._save_checkpoint(new_best)
# append to validation report
self._add_report(valid_score=valid_score,
valid_loss=valid_loss,
valid_ppl=valid_ppl,
eval_metric=self.eval_metric,
new_best=new_best)
self._log_examples(sources_raw=[v for v in valid_sources_raw],
sources=valid_sources,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references)
valid_duration = time.time() - valid_start_time
logger.info(
'Validation result (greedy) at epoch %3d, '
'step %8d: %s: %6.2f, loss: %8.4f, ppl: %8.4f, '
'duration: %.4fs', epoch_no + 1, self.stats.steps, self.eval_metric,
valid_score, valid_loss, valid_ppl, valid_duration)
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores:
store_attention_plots(attentions=valid_attention_scores,
targets=valid_hypotheses_raw,
sources=[s for s in valid_data.src],
indices=self.log_valid_sents,
output_prefix="{}/att.{}".format(
self.model_dir, self.stats.steps),
tb_writer=self.tb_writer,
steps=self.stats.steps)
return valid_duration
def _add_report(self,
valid_score: float,
valid_ppl: float,
valid_loss: float,
eval_metric: str,
new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stats.stop = True
with open(self.valid_report_file, 'a') as opened_file:
opened_file.write(
"Steps: {}\tLoss: {:.5f}\tPPL: {:.5f}\t{}: {:.5f}\t"
"LR: {:.8f}\t{}\n".format(self.stats.steps, valid_loss,
valid_ppl, eval_metric, valid_score,
current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
combiner_parameters = filter(lambda p: p.requires_grad,
self.model.combiner.parameters())
n_params = sum([np.prod(p.size()) for p in combiner_parameters])
logger.info("Total params: %d", n_params)
trainable_params = [
n for (n, p) in self.model.combiner.named_parameters() if p.requires_grad
]
logger.debug("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(self,
sources: List[str],
hypotheses: List[str],
references: List[str],
sources_raw: List[List[str]] = None,
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
for p in self.log_valid_sents:
if p >= len(sources):
continue
logger.info("Example #%d", p)
if sources_raw is not None:
logger.debug("\tRaw source: %s", sources_raw[p])
if references_raw is not None:
logger.debug("\tRaw reference: %s", references_raw[p])
if hypotheses_raw is not None:
logger.debug("\tRaw hypothesis: %s", hypotheses_raw[p])
logger.info("\tSource: %s", sources[p])
logger.info("\tReference: %s", references[p])
logger.info("\tHypothesis: %s", hypotheses[p])
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
current_valid_output_file = "{}/{}.hyps".format(self.model_dir,
self.stats.steps)
with open(current_valid_output_file, 'w') as opened_file:
for hyp in hypotheses:
opened_file.write("{}\n".format(hyp))
class TrainStatistics:
def __init__(self,
steps: int = 0,
stop: bool = False,
total_tokens: int = 0,
best_ckpt_iter: int = 0,
best_ckpt_score: float = np.inf,
minimize_metric: bool = True,
max_steps: int = 200000) -> None:
# global update step counter
self.steps = steps
# stop training if this flag is True
# by reaching learning rate minimum
self.stop = stop
# number of total tokens seen so far
self.total_tokens = total_tokens
# store iteration point of best ckpt
self.best_ckpt_iter = best_ckpt_iter
# initial values for best scores
self.best_ckpt_score = best_ckpt_score
# minimize or maximize score
self.minimize_metric = minimize_metric
self.max_steps = max_steps
def is_best(self, score):
if self.minimize_metric:
is_best = score < self.best_ckpt_score
else:
is_best = score > self.best_ckpt_score
return is_best
def combiner_train(cfg_file: str, ckpt: str, combiner_cfg: dict) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
# make logger
model_dir = make_model_dir(cfg["combiner_training"]["model_dir"],
overwrite=cfg["combiner_training"].get(
"overwrite", False))
_ = make_logger(model_dir, mode="train") # version string returned
# TODO: save version number in model checkpoints
check_combiner_cfg(combiner_cfg)
cfg["combiner"] = combiner_cfg
# set the random seed
set_seed(seed=cfg["combiner_training"].get("random_seed", 42))
# load the data
train_data, dev_data, test_data, src_vocab, trg_vocab = load_data(
data_cfg=cfg["data"])
# load model state from disk
use_cuda = cfg["combiner_training"]["use_cuda"]
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
# build model and load parameters into it
model = build_model(cfg["model"], src_vocab=src_vocab, trg_vocab=trg_vocab)
model.load_state_dict(model_checkpoint["model_state"])
for p in model.parameters():
p.requires_grad = False
combiner = build_combiner(cfg)
# load combiner from checkpoint for dynamic combiners
model.combiner = combiner
# for training management, e.g. early stopping and model selection
trainer = CombinerTrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, model_dir + "/config.yaml")
# log all entries of config
log_cfg(cfg)
log_data_info(train_data=train_data,
valid_data=dev_data,
test_data=test_data,
src_vocab=src_vocab,
trg_vocab=trg_vocab)
logger.info(str(model))
logger.info(str(combiner))
# store the vocabs
src_vocab_file = "{}/src_vocab.txt".format(cfg["combiner_training"]["model_dir"])
src_vocab.to_file(src_vocab_file)
trg_vocab_file = "{}/trg_vocab.txt".format(cfg["combiner_training"]["model_dir"])
trg_vocab.to_file(trg_vocab_file)
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data) | 34,477 | 40.893074 | 89 | py |
KSTER | KSTER-main/joeynmt/decoders.py | # coding: utf-8
"""
Various decoders
"""
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from joeynmt.attention import BahdanauAttention, LuongAttention
from joeynmt.encoders import Encoder
from joeynmt.helpers import freeze_params, ConfigurationError, subsequent_mask
from joeynmt.transformer_layers import PositionalEncoding, \
TransformerDecoderLayer
# pylint: disable=abstract-method
class Decoder(nn.Module):
"""
Base decoder class
"""
@property
def output_size(self):
"""
Return the output size (size of the target vocabulary)
:return:
"""
return self._output_size
# pylint: disable=arguments-differ,too-many-arguments
# pylint: disable=too-many-instance-attributes, unused-argument
class RecurrentDecoder(Decoder):
"""A conditional RNN decoder with attention."""
def __init__(self,
rnn_type: str = "gru",
emb_size: int = 0,
hidden_size: int = 0,
encoder: Encoder = None,
attention: str = "bahdanau",
num_layers: int = 1,
vocab_size: int = 0,
dropout: float = 0.,
emb_dropout: float = 0.,
hidden_dropout: float = 0.,
init_hidden: str = "bridge",
input_feeding: bool = True,
freeze: bool = False,
**kwargs) -> None:
"""
Create a recurrent decoder with attention.
:param rnn_type: rnn type, valid options: "lstm", "gru"
:param emb_size: target embedding size
:param hidden_size: size of the RNN
:param encoder: encoder connected to this decoder
:param attention: type of attention, valid options: "bahdanau", "luong"
:param num_layers: number of recurrent layers
:param vocab_size: target vocabulary size
:param hidden_dropout: Is applied to the input to the attentional layer.
:param dropout: Is applied between RNN layers.
:param emb_dropout: Is applied to the RNN input (word embeddings).
:param init_hidden: If "bridge" (default), the decoder hidden states are
initialized from a projection of the last encoder state,
if "zeros" they are initialized with zeros,
if "last" they are identical to the last encoder state
(only if they have the same size)
:param input_feeding: Use Luong's input feeding.
:param freeze: Freeze the parameters of the decoder during training.
:param kwargs:
"""
super().__init__()
self.emb_dropout = torch.nn.Dropout(p=emb_dropout, inplace=False)
self.type = rnn_type
self.hidden_dropout = torch.nn.Dropout(p=hidden_dropout, inplace=False)
self.hidden_size = hidden_size
self.emb_size = emb_size
rnn = nn.GRU if rnn_type == "gru" else nn.LSTM
self.input_feeding = input_feeding
if self.input_feeding: # Luong-style
# combine embedded prev word +attention vector before feeding to rnn
self.rnn_input_size = emb_size + hidden_size
else:
# just feed prev word embedding
self.rnn_input_size = emb_size
# the decoder RNN
self.rnn = rnn(self.rnn_input_size, hidden_size, num_layers,
batch_first=True,
dropout=dropout if num_layers > 1 else 0.)
# combine output with context vector before output layer (Luong-style)
self.att_vector_layer = nn.Linear(
hidden_size + encoder.output_size, hidden_size, bias=True)
self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)
self._output_size = vocab_size
if attention == "bahdanau":
self.attention = BahdanauAttention(hidden_size=hidden_size,
key_size=encoder.output_size,
query_size=hidden_size)
elif attention == "luong":
self.attention = LuongAttention(hidden_size=hidden_size,
key_size=encoder.output_size)
else:
raise ConfigurationError("Unknown attention mechanism: %s. "
"Valid options: 'bahdanau', 'luong'."
% attention)
self.num_layers = num_layers
self.hidden_size = hidden_size
# to initialize from the final encoder state of last layer
self.init_hidden_option = init_hidden
if self.init_hidden_option == "bridge":
self.bridge_layer = nn.Linear(
encoder.output_size, hidden_size, bias=True)
elif self.init_hidden_option == "last":
if encoder.output_size != self.hidden_size:
if encoder.output_size != 2*self.hidden_size: # bidirectional
raise ConfigurationError(
"For initializing the decoder state with the "
"last encoder state, their sizes have to match "
"(encoder: {} vs. decoder: {})".format(
encoder.output_size, self.hidden_size))
if freeze:
freeze_params(self)
def _check_shapes_input_forward_step(self,
prev_embed: Tensor,
prev_att_vector: Tensor,
encoder_output: Tensor,
src_mask: Tensor,
hidden: Tensor) -> None:
"""
Make sure the input shapes to `self._forward_step` are correct.
Same inputs as `self._forward_step`.
:param prev_embed:
:param prev_att_vector:
:param encoder_output:
:param src_mask:
:param hidden:
"""
assert prev_embed.shape[1:] == torch.Size([1, self.emb_size])
assert prev_att_vector.shape[1:] == torch.Size(
[1, self.hidden_size])
assert prev_att_vector.shape[0] == prev_embed.shape[0]
assert encoder_output.shape[0] == prev_embed.shape[0]
assert len(encoder_output.shape) == 3
assert src_mask.shape[0] == prev_embed.shape[0]
assert src_mask.shape[1] == 1
assert src_mask.shape[2] == encoder_output.shape[1]
if isinstance(hidden, tuple): # for lstm
hidden = hidden[0]
assert hidden.shape[0] == self.num_layers
assert hidden.shape[1] == prev_embed.shape[0]
assert hidden.shape[2] == self.hidden_size
def _check_shapes_input_forward(self,
trg_embed: Tensor,
encoder_output: Tensor,
encoder_hidden: Tensor,
src_mask: Tensor,
hidden: Tensor = None,
prev_att_vector: Tensor = None) -> None:
"""
Make sure that inputs to `self.forward` are of correct shape.
Same input semantics as for `self.forward`.
:param trg_embed:
:param encoder_output:
:param encoder_hidden:
:param src_mask:
:param hidden:
:param prev_att_vector:
"""
assert len(encoder_output.shape) == 3
if encoder_hidden is not None:
assert len(encoder_hidden.shape) == 2
assert encoder_hidden.shape[-1] == encoder_output.shape[-1]
assert src_mask.shape[1] == 1
assert src_mask.shape[0] == encoder_output.shape[0]
assert src_mask.shape[2] == encoder_output.shape[1]
assert trg_embed.shape[0] == encoder_output.shape[0]
assert trg_embed.shape[2] == self.emb_size
if hidden is not None:
if isinstance(hidden, tuple): # for lstm
hidden = hidden[0]
assert hidden.shape[1] == encoder_output.shape[0]
assert hidden.shape[2] == self.hidden_size
if prev_att_vector is not None:
assert prev_att_vector.shape[0] == encoder_output.shape[0]
assert prev_att_vector.shape[2] == self.hidden_size
assert prev_att_vector.shape[1] == 1
def _forward_step(self,
prev_embed: Tensor,
prev_att_vector: Tensor, # context or att vector
encoder_output: Tensor,
src_mask: Tensor,
hidden: Tensor) -> (Tensor, Tensor, Tensor):
"""
Perform a single decoder step (1 token).
1. `rnn_input`: concat(prev_embed, prev_att_vector [possibly empty])
2. update RNN with `rnn_input`
3. calculate attention and context/attention vector
:param prev_embed: embedded previous token,
shape (batch_size, 1, embed_size)
:param prev_att_vector: previous attention vector,
shape (batch_size, 1, hidden_size)
:param encoder_output: encoder hidden states for attention context,
shape (batch_size, src_length, encoder.output_size)
:param src_mask: src mask, 1s for area before <eos>, 0s elsewhere
shape (batch_size, 1, src_length)
:param hidden: previous hidden state,
shape (num_layers, batch_size, hidden_size)
:return:
- att_vector: new attention vector (batch_size, 1, hidden_size),
- hidden: new hidden state with shape (batch_size, 1, hidden_size),
- att_probs: attention probabilities (batch_size, 1, src_len)
"""
# shape checks
self._check_shapes_input_forward_step(prev_embed=prev_embed,
prev_att_vector=prev_att_vector,
encoder_output=encoder_output,
src_mask=src_mask,
hidden=hidden)
if self.input_feeding:
# concatenate the input with the previous attention vector
rnn_input = torch.cat([prev_embed, prev_att_vector], dim=2)
else:
rnn_input = prev_embed
rnn_input = self.emb_dropout(rnn_input)
# rnn_input: batch x 1 x emb+2*enc_size
_, hidden = self.rnn(rnn_input, hidden)
# use new (top) decoder layer as attention query
if isinstance(hidden, tuple):
query = hidden[0][-1].unsqueeze(1)
else:
query = hidden[-1].unsqueeze(1) # [#layers, B, D] -> [B, 1, D]
# compute context vector using attention mechanism
# only use last layer for attention mechanism
# key projections are pre-computed
context, att_probs = self.attention(
query=query, values=encoder_output, mask=src_mask)
# return attention vector (Luong)
# combine context with decoder hidden state before prediction
att_vector_input = torch.cat([query, context], dim=2)
# batch x 1 x 2*enc_size+hidden_size
att_vector_input = self.hidden_dropout(att_vector_input)
att_vector = torch.tanh(self.att_vector_layer(att_vector_input))
# output: batch x 1 x hidden_size
return att_vector, hidden, att_probs
def forward(self,
trg_embed: Tensor,
encoder_output: Tensor,
encoder_hidden: Tensor,
src_mask: Tensor,
unroll_steps: int,
hidden: Tensor = None,
prev_att_vector: Tensor = None,
**kwargs) \
-> (Tensor, Tensor, Tensor, Tensor):
"""
Unroll the decoder one step at a time for `unroll_steps` steps.
For every step, the `_forward_step` function is called internally.
During training, the target inputs (`trg_embed') are already known for
the full sequence, so the full unrol is done.
In this case, `hidden` and `prev_att_vector` are None.
For inference, this function is called with one step at a time since
embedded targets are the predictions from the previous time step.
In this case, `hidden` and `prev_att_vector` are fed from the output
of the previous call of this function (from the 2nd step on).
`src_mask` is needed to mask out the areas of the encoder states that
should not receive any attention,
which is everything after the first <eos>.
The `encoder_output` are the hidden states from the encoder and are
used as context for the attention.
The `encoder_hidden` is the last encoder hidden state that is used to
initialize the first hidden decoder state
(when `self.init_hidden_option` is "bridge" or "last").
:param trg_embed: embedded target inputs,
shape (batch_size, trg_length, embed_size)
:param encoder_output: hidden states from the encoder,
shape (batch_size, src_length, encoder.output_size)
:param encoder_hidden: last state from the encoder,
shape (batch_size, encoder.output_size)
:param src_mask: mask for src states: 0s for padded areas,
1s for the rest, shape (batch_size, 1, src_length)
:param unroll_steps: number of steps to unroll the decoder RNN
:param hidden: previous decoder hidden state,
if not given it's initialized as in `self.init_hidden`,
shape (batch_size, num_layers, hidden_size)
:param prev_att_vector: previous attentional vector,
if not given it's initialized with zeros,
shape (batch_size, 1, hidden_size)
:return:
- outputs: shape (batch_size, unroll_steps, vocab_size),
- hidden: last hidden state (batch_size, num_layers, hidden_size),
- att_probs: attention probabilities
with shape (batch_size, unroll_steps, src_length),
- att_vectors: attentional vectors
with shape (batch_size, unroll_steps, hidden_size)
"""
# initialize decoder hidden state from final encoder hidden state
if hidden is None and encoder_hidden is not None:
hidden = self._init_hidden(encoder_hidden)
else:
# DataParallel splits batch along the 0th dim.
# Place back the batch_size to the 1st dim here.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2).contiguous(),
c.permute(1, 0, 2).contiguous())
else:
hidden = hidden.permute(1, 0, 2).contiguous()
# shape (num_layers, batch_size, hidden_size)
# shape checks
self._check_shapes_input_forward(
trg_embed=trg_embed,
encoder_output=encoder_output,
encoder_hidden=encoder_hidden,
src_mask=src_mask,
hidden=hidden,
prev_att_vector=prev_att_vector)
# pre-compute projected encoder outputs
# (the "keys" for the attention mechanism)
# this is only done for efficiency
if hasattr(self.attention, "compute_proj_keys"):
self.attention.compute_proj_keys(keys=encoder_output)
# here we store all intermediate attention vectors (used for prediction)
att_vectors = []
att_probs = []
batch_size = encoder_output.size(0)
if prev_att_vector is None:
with torch.no_grad():
prev_att_vector = encoder_output.new_zeros(
[batch_size, 1, self.hidden_size])
# unroll the decoder RNN for `unroll_steps` steps
for i in range(unroll_steps):
prev_embed = trg_embed[:, i].unsqueeze(1) # batch, 1, emb
prev_att_vector, hidden, att_prob = self._forward_step(
prev_embed=prev_embed,
prev_att_vector=prev_att_vector,
encoder_output=encoder_output,
src_mask=src_mask,
hidden=hidden)
att_vectors.append(prev_att_vector)
att_probs.append(att_prob)
att_vectors = torch.cat(att_vectors, dim=1)
# att_vectors: batch, unroll_steps, hidden_size
att_probs = torch.cat(att_probs, dim=1)
# att_probs: batch, unroll_steps, src_length
outputs = self.output_layer(att_vectors)
# outputs: batch, unroll_steps, vocab_size
# DataParallel gathers batches along the 0th dim.
# Put batch_size dim to the 0th position.
if isinstance(hidden, tuple):
h, c = hidden
hidden = (h.permute(1, 0, 2).contiguous(),
c.permute(1, 0, 2).contiguous())
assert hidden[0].size(0) == batch_size
else:
hidden = hidden.permute(1, 0, 2).contiguous()
assert hidden.size(0) == batch_size
# shape (batch_size, num_layers, hidden_size)
return outputs, hidden, att_probs, att_vectors
def _init_hidden(self, encoder_final: Tensor = None) \
-> (Tensor, Optional[Tensor]):
"""
Returns the initial decoder state,
conditioned on the final encoder state of the last encoder layer.
In case of `self.init_hidden_option == "bridge"`
and a given `encoder_final`, this is a projection of the encoder state.
In case of `self.init_hidden_option == "last"`
and a size-matching `encoder_final`, this is set to the encoder state.
If the encoder is twice as large as the decoder state (e.g. when
bi-directional), just use the forward hidden state.
In case of `self.init_hidden_option == "zero"`, it is initialized with
zeros.
For LSTMs we initialize both the hidden state and the memory cell
with the same projection/copy of the encoder hidden state.
All decoder layers are initialized with the same initial values.
:param encoder_final: final state from the last layer of the encoder,
shape (batch_size, encoder_hidden_size)
:return: hidden state if GRU, (hidden state, memory cell) if LSTM,
shape (batch_size, hidden_size)
"""
batch_size = encoder_final.size(0)
# for multiple layers: is the same for all layers
if self.init_hidden_option == "bridge" and encoder_final is not None:
# num_layers x batch_size x hidden_size
hidden = torch.tanh(
self.bridge_layer(encoder_final)).unsqueeze(0).repeat(
self.num_layers, 1, 1)
elif self.init_hidden_option == "last" and encoder_final is not None:
# special case: encoder is bidirectional: use only forward state
if encoder_final.shape[1] == 2*self.hidden_size: # bidirectional
encoder_final = encoder_final[:, :self.hidden_size]
hidden = encoder_final.unsqueeze(0).repeat(self.num_layers, 1, 1)
else: # initialize with zeros
with torch.no_grad():
hidden = encoder_final.new_zeros(
self.num_layers, batch_size, self.hidden_size)
return (hidden, hidden) if isinstance(self.rnn, nn.LSTM) else hidden
def __repr__(self):
return "RecurrentDecoder(rnn=%r, attention=%r)" % (
self.rnn, self.attention)
# pylint: disable=arguments-differ,too-many-arguments
# pylint: disable=too-many-instance-attributes, unused-argument
class TransformerDecoder(Decoder):
"""
A transformer decoder with N masked layers.
Decoder layers are masked so that an attention head cannot see the future.
"""
def __init__(self,
num_layers: int = 4,
num_heads: int = 8,
hidden_size: int = 512,
ff_size: int = 2048,
dropout: float = 0.1,
emb_dropout: float = 0.1,
vocab_size: int = 1,
freeze: bool = False,
**kwargs):
"""
Initialize a Transformer decoder.
:param num_layers: number of Transformer layers
:param num_heads: number of heads for each layer
:param hidden_size: hidden size
:param ff_size: position-wise feed-forward size
:param dropout: dropout probability (1-keep)
:param emb_dropout: dropout probability for embeddings
:param vocab_size: size of the output vocabulary
:param freeze: set to True keep all decoder parameters fixed
:param kwargs:
"""
super().__init__()
self._hidden_size = hidden_size
self._output_size = vocab_size
# create num_layers decoder layers and put them in a list
self.layers = nn.ModuleList([TransformerDecoderLayer(
size=hidden_size, ff_size=ff_size, num_heads=num_heads,
dropout=dropout) for _ in range(num_layers)])
self.pe = PositionalEncoding(hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.emb_dropout = nn.Dropout(p=emb_dropout)
self.output_layer = nn.Linear(hidden_size, vocab_size, bias=False)
if freeze:
freeze_params(self)
def forward(self,
trg_embed: Tensor = None,
encoder_output: Tensor = None,
encoder_hidden: Tensor = None,
src_mask: Tensor = None,
unroll_steps: int = None,
hidden: Tensor = None,
trg_mask: Tensor = None,
**kwargs):
"""
Transformer decoder forward pass.
:param trg_embed: embedded targets
:param encoder_output: source representations
:param encoder_hidden: unused
:param src_mask:
:param unroll_steps: unused
:param hidden: unused
:param trg_mask: to mask out target paddings
Note that a subsequent mask is applied here.
:param kwargs:
:return:
"""
assert trg_mask is not None, "trg_mask required for Transformer"
x = self.pe(trg_embed) # add position encoding to word embedding
x = self.emb_dropout(x)
trg_mask = trg_mask & subsequent_mask(
trg_embed.size(1)).type_as(trg_mask)
stl_x = None
for layer in self.layers:
stl_x = x
x = layer(x=x, memory=encoder_output,
src_mask=src_mask, trg_mask=trg_mask)
c = self.layers[-1].context_representations(stl_x,
memory=encoder_output, src_mask=src_mask, trg_mask=trg_mask)
x = self.layer_norm(x)
output = self.output_layer(x)
return output, c, None, None
def __repr__(self):
return "%s(num_layers=%r, num_heads=%r)" % (
self.__class__.__name__, len(self.layers),
self.layers[0].trg_trg_att.num_heads)
| 23,155 | 40.647482 | 80 | py |
KSTER | KSTER-main/joeynmt/encoders.py | # coding: utf-8
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from joeynmt.helpers import freeze_params
from joeynmt.transformer_layers import \
TransformerEncoderLayer, PositionalEncoding
#pylint: disable=abstract-method
class Encoder(nn.Module):
"""
Base encoder class
"""
@property
def output_size(self):
"""
Return the output size
:return:
"""
return self._output_size
class RecurrentEncoder(Encoder):
"""Encodes a sequence of word embeddings"""
#pylint: disable=unused-argument
def __init__(self,
rnn_type: str = "gru",
hidden_size: int = 1,
emb_size: int = 1,
num_layers: int = 1,
dropout: float = 0.,
emb_dropout: float = 0.,
bidirectional: bool = True,
freeze: bool = False,
**kwargs) -> None:
"""
Create a new recurrent encoder.
:param rnn_type: RNN type: `gru` or `lstm`.
:param hidden_size: Size of each RNN.
:param emb_size: Size of the word embeddings.
:param num_layers: Number of encoder RNN layers.
:param dropout: Is applied between RNN layers.
:param emb_dropout: Is applied to the RNN input (word embeddings).
:param bidirectional: Use a bi-directional RNN.
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
self.emb_dropout = torch.nn.Dropout(p=emb_dropout, inplace=False)
self.type = rnn_type
self.emb_size = emb_size
rnn = nn.GRU if rnn_type == "gru" else nn.LSTM
self.rnn = rnn(
emb_size, hidden_size, num_layers, batch_first=True,
bidirectional=bidirectional,
dropout=dropout if num_layers > 1 else 0.)
self._output_size = 2 * hidden_size if bidirectional else hidden_size
if freeze:
freeze_params(self)
#pylint: disable=invalid-name, unused-argument
def _check_shapes_input_forward(self, embed_src: Tensor, src_length: Tensor,
mask: Tensor) -> None:
"""
Make sure the shape of the inputs to `self.forward` are correct.
Same input semantics as `self.forward`.
:param embed_src: embedded source tokens
:param src_length: source length
:param mask: source mask
"""
assert embed_src.shape[0] == src_length.shape[0]
assert embed_src.shape[2] == self.emb_size
# assert mask.shape == embed_src.shape
assert len(src_length.shape) == 1
#pylint: disable=arguments-differ
def forward(self, embed_src: Tensor, src_length: Tensor, mask: Tensor,
**kwargs) -> (Tensor, Tensor):
"""
Applies a bidirectional RNN to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, src_len, embed_size)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
self._check_shapes_input_forward(embed_src=embed_src,
src_length=src_length,
mask=mask)
total_length = embed_src.size(1)
# apply dropout to the rnn input
embed_src = self.emb_dropout(embed_src)
packed = pack_padded_sequence(embed_src, src_length.cpu(),
batch_first=True)
output, hidden = self.rnn(packed)
#pylint: disable=unused-variable
if isinstance(hidden, tuple):
hidden, memory_cell = hidden
output, _ = pad_packed_sequence(output, batch_first=True,
total_length=total_length)
# hidden: dir*layers x batch x hidden
# output: batch x max_length x directions*hidden
batch_size = hidden.size()[1]
# separate final hidden states by layer and direction
hidden_layerwise = hidden.view(self.rnn.num_layers,
2 if self.rnn.bidirectional else 1,
batch_size, self.rnn.hidden_size)
# final_layers: layers x directions x batch x hidden
# concatenate the final states of the last layer for each directions
# thanks to pack_padded_sequence final states don't include padding
fwd_hidden_last = hidden_layerwise[-1:, 0]
bwd_hidden_last = hidden_layerwise[-1:, 1]
# only feed the final state of the top-most layer to the decoder
#pylint: disable=no-member
hidden_concat = torch.cat(
[fwd_hidden_last, bwd_hidden_last], dim=2).squeeze(0)
# final: batch x directions*hidden
return output, hidden_concat
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.rnn)
class TransformerEncoder(Encoder):
"""
Transformer Encoder
"""
#pylint: disable=unused-argument
def __init__(self,
hidden_size: int = 512,
ff_size: int = 2048,
num_layers: int = 8,
num_heads: int = 4,
dropout: float = 0.1,
emb_dropout: float = 0.1,
freeze: bool = False,
**kwargs):
"""
Initializes the Transformer.
:param hidden_size: hidden size and size of embeddings
:param ff_size: position-wise feed-forward layer size.
(Typically this is 2*hidden_size.)
:param num_layers: number of layers
:param num_heads: number of heads for multi-headed attention
:param dropout: dropout probability for Transformer layers
:param emb_dropout: Is applied to the input (word embeddings).
:param freeze: freeze the parameters of the encoder during training
:param kwargs:
"""
super().__init__()
# build all (num_layers) layers
self.layers = nn.ModuleList([
TransformerEncoderLayer(size=hidden_size, ff_size=ff_size,
num_heads=num_heads, dropout=dropout)
for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-6)
self.pe = PositionalEncoding(hidden_size)
self.emb_dropout = nn.Dropout(p=emb_dropout)
self._output_size = hidden_size
if freeze:
freeze_params(self)
#pylint: disable=arguments-differ
def forward(self,
embed_src: Tensor,
src_length: Tensor,
mask: Tensor, **kwargs) -> (Tensor, Tensor):
"""
Pass the input (and mask) through each layer in turn.
Applies a Transformer encoder to sequence of embeddings x.
The input mini-batch x needs to be sorted by src length.
x and mask should have the same dimensions [batch, time, dim].
:param embed_src: embedded src inputs,
shape (batch_size, src_len, embed_size)
:param src_length: length of src inputs
(counting tokens before padding), shape (batch_size)
:param mask: indicates padding areas (zeros where padding), shape
(batch_size, 1, src_len)
:return:
- output: hidden states with
shape (batch_size, max_length, directions*hidden),
- hidden_concat: last hidden state with
shape (batch_size, directions*hidden)
"""
x = self.pe(embed_src) # add position encoding to word embeddings
x = self.emb_dropout(x)
for layer in self.layers:
x = layer(x, mask)
return self.layer_norm(x), None
def __repr__(self):
return "%s(num_layers=%r, num_heads=%r)" % (
self.__class__.__name__, len(self.layers),
self.layers[0].src_src_att.num_heads)
| 8,571 | 36.432314 | 80 | py |
KSTER | KSTER-main/joeynmt/kernel.py | import torch
from typing import Tuple, Union
class Kernel(object):
def __init__(self) -> None:
super(Kernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
raise NotImplementedError
def compute_example_based_distribution(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor], token_indices: torch.Tensor,
vocab_size: int) -> Tuple[torch.Tensor, torch.Tensor]:
scores = self.similarity(distances, bandwidth)
sparse_distribution = torch.softmax(scores, dim=-1)
zeros = torch.zeros(size=(sparse_distribution.size(0), vocab_size), device=sparse_distribution.device, dtype=sparse_distribution.dtype)
distribution = torch.scatter_add(zeros, -1, token_indices, sparse_distribution)
return distribution, sparse_distribution
class GaussianKernel(Kernel):
def __init__(self) -> None:
super(GaussianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
return - distances / bandwidth
class LaplacianKernel(Kernel):
def __init__(self) -> None:
super(LaplacianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[float, torch.Tensor]) -> torch.Tensor:
return - torch.sqrt(distances) / bandwidth | 1,423 | 40.882353 | 143 | py |
AT-on-AD | AT-on-AD-main/test_fmnist.py | import argparse
import logging
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
class MyLR(nn.Module):
def __init__(self, input_size, num_classes):
super(MyLR, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
class MnistDataset(torch.utils.data.Dataset):
def __init__(self, X, y):
self.X = torch.FloatTensor(X)
self.y = torch.LongTensor(y)
def __len__(self):
return len(self.y)
def __getitem__(self, index):
return self.X[index], 0 if self.y[index]==1 else 1
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s : %(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--R', type=int, default=1)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--bs', type=int, default=64)
parser.add_argument('--n-epochs', type=int, default=20)
parser.add_argument('--adv', action='store_true', default=False)
parser.add_argument('--norm-type', type=str, default='linf', choices=['linf', 'l2'])
parser.add_argument('--norm-scale', type=float)
return parser.parse_args()
args = parse_args()
folder_main = 'record_fmnist'
if args.adv:
folder_sub = osp.join(folder_main, f'train-adv_R-{args.R}_lr-{args.lr}_normtype-{args.norm_type}_normscale-{args.norm_scale}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
else:
folder_sub = osp.join(folder_main, f'train-std_R-{args.R}_lr-{args.lr}_epoch-{args.n_epochs}_bs-{args.bs}_seed-{args.seed}')
if not osp.exists(folder_sub):
os.makedirs(folder_sub)
task_name = 'test'
setup_logger(task_name, os.path.join(folder_sub, f'test.log'))
logger = logging.getLogger(task_name)
logger.info(f'args = {args}')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. data loading
data = torch.load(osp.join('data_fmnist', f'data_R_1.pth'))
X_test, y_test = data['X_test'], data['y_test']
test_set = MnistDataset(X_test, y_test)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.bs, shuffle=False)
batch_size = 64
input_size = 784
num_classes = 2
model = MyLR(input_size, num_classes)
model.load_state_dict(torch.load(osp.join(folder_sub, 'model.pth')))
model.cuda()
model.eval()
criterion = nn.CrossEntropyLoss()
def test(data_loader):
eval_losses = []
correct = 0
correct_class = [0, 0]
total = 0
for X, y in data_loader:
X = X.view(-1, 784).cuda()
y = y.cuda()
output = model(X)
loss = criterion(output, y)
_, predicted = torch.max(output.data, 1)
eval_losses.append(loss.item())
total += y.size(0)
correct += (predicted == y).sum().item()
correct_class[0] += torch.logical_and(predicted==y, y==0).sum().item()
correct_class[1] += torch.logical_and(predicted==y, y==1).sum().item()
return correct, correct_class, total, np.mean(eval_losses)
test_correct, test_correct_class, test_total, test_loss = test(test_loader)
logger.info(f'test_correct = {test_correct}, test_correct_class = {test_correct_class}, test_total = {test_total}, test_loss = {test_loss}')
| 3,685 | 28.023622 | 182 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.