repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
3D_STEP_Classification | 3D_STEP_Classification-main/MultiView_Classification/models/MVCNN.py | import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
from .Model import Model
mean = Variable(torch.FloatTensor([0.485, 0.456, 0.406]), requires_grad=False).cuda()
std = Variable(torch.FloatTensor([0.229, 0.224, 0.225]), requires_grad=False).cuda()
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[:, getattr(torch.arange(x.size(1)-1,
-1, -1), ('cpu','cuda')[x.is_cuda])().long(), :]
return x.view(xsize)
class SVCNN(Model):
def __init__(self, name, nclasses=40, pretraining=True, cnn_name='vgg11'):
super(SVCNN, self).__init__(name)
# self.classnames=['airplane','bathtub','bed','bench','bookshelf','bottle','bowl','car','chair',
# 'cone','cup','curtain','desk','door','dresser','flower_pot','glass_box',
# 'guitar','keyboard','lamp','laptop','mantel','monitor','night_stand',
# 'person','piano','plant','radio','range_hood','sink','sofa','stairs',
# 'stool','table','tent','toilet','tv_stand','vase','wardrobe','xbox']
self.classnames = ['0', '1', '2', '3', '4', '5']
self.nclasses = nclasses
self.pretraining = pretraining
self.cnn_name = cnn_name
self.use_resnet = cnn_name.startswith('resnet')
self.mean = Variable(torch.FloatTensor([0.485, 0.456, 0.406]), requires_grad=False).cuda()
self.std = Variable(torch.FloatTensor([0.229, 0.224, 0.225]), requires_grad=False).cuda()
if self.use_resnet:
if self.cnn_name == 'resnet18':
self.net = models.resnet18(pretrained=self.pretraining)
self.net.fc = nn.Linear(512,nclasses)
elif self.cnn_name == 'resnet34':
self.net = models.resnet34(pretrained=self.pretraining)
self.net.fc = nn.Linear(512,nclasses)
elif self.cnn_name == 'resnet50':
self.net = models.resnet50(pretrained=self.pretraining)
self.net.fc = nn.Linear(2048,nclasses)
else:
if self.cnn_name == 'alexnet':
self.net_1 = models.alexnet(pretrained=self.pretraining).features
self.net_2 = models.alexnet(pretrained=self.pretraining).classifier
elif self.cnn_name == 'vgg11':
self.net_1 = models.vgg11(pretrained=self.pretraining).features
self.net_2 = models.vgg11(pretrained=self.pretraining).classifier
elif self.cnn_name == 'vgg16':
self.net_1 = models.vgg16(pretrained=self.pretraining).features
self.net_2 = models.vgg16(pretrained=self.pretraining).classifier
self.net_2._modules['6'] = nn.Linear(4096,nclasses)
def forward(self, x):
if self.use_resnet:
return self.net(x)
else:
y = self.net_1(x)
return self.net_2(y.view(y.shape[0],-1))
class MVCNN(Model):
def __init__(self, name, model, nclasses=40, cnn_name='vgg11', num_views=12):
super(MVCNN, self).__init__(name)
self.classnames=['airplane','bathtub','bed','bench','bookshelf','bottle','bowl','car','chair',
'cone','cup','curtain','desk','door','dresser','flower_pot','glass_box',
'guitar','keyboard','lamp','laptop','mantel','monitor','night_stand',
'person','piano','plant','radio','range_hood','sink','sofa','stairs',
'stool','table','tent','toilet','tv_stand','vase','wardrobe','xbox']
self.nclasses = nclasses
self.num_views = num_views
self.mean = Variable(torch.FloatTensor([0.485, 0.456, 0.406]), requires_grad=False).cuda()
self.std = Variable(torch.FloatTensor([0.229, 0.224, 0.225]), requires_grad=False).cuda()
self.use_resnet = cnn_name.startswith('resnet')
if self.use_resnet:
self.net_1 = nn.Sequential(*list(model.net.children())[:-1])
self.net_2 = model.net.fc
else:
self.net_1 = model.net_1
self.net_2 = model.net_2
def forward(self, x):
y = self.net_1(x)
y = y.view((int(x.shape[0]/self.num_views),self.num_views,y.shape[-3],y.shape[-2],y.shape[-1]))#(8,12,512,7,7)
return self.net_2(torch.max(y,1)[0].view(y.shape[0],-1))
| 4,572 | 43.833333 | 118 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/MultiView_Classification/models/Model.py | import torch
import torch.nn as nn
import os
import glob
class Model(nn.Module):
def __init__(self, name):
super(Model, self).__init__()
self.name = name
def save(self, path, epoch=0):
complete_path = os.path.join(path, self.name)
if not os.path.exists(complete_path):
os.makedirs(complete_path)
torch.save(self.state_dict(),
os.path.join(complete_path,
"model-{}.pth".format(str(epoch).zfill(5))))
def save_results(self, path, data):
raise NotImplementedError("Model subclass must implement this method.")
def load(self, path, modelfile=None):
complete_path = os.path.join(path, self.name)
if not os.path.exists(complete_path):
raise IOError("{} directory does not exist in {}".format(self.name, path))
if modelfile is None:
model_files = glob.glob(complete_path+"/*")
mf = max(model_files)
else:
mf = os.path.join(complete_path, modelfile)
self.load_state_dict(torch.load(mf))
| 1,102 | 25.902439 | 86 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/Graph_classification/GCN.py | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
import torch
class AttentionModule(torch.nn.Module):
"""
Attention Module to make a pass on graph.
"""
def __init__(self, dim):
super(AttentionModule, self).__init__()
self.setup_weights(dim)
self.init_parameters()
def setup_weights(self, dim):
"""
Defining weights.
"""
self.weight_matrix = torch.nn.Parameter(torch.Tensor(dim, dim))
def init_parameters(self):
"""
Initializing weights.
"""
torch.nn.init.xavier_uniform_(self.weight_matrix)
def forward(self, embedding):
"""
Making a forward propagation pass to create a graph level representation.
:param embedding: Result of the GCN.
:return representation: A graph level representation vector.
"""
global_context = torch.mean(torch.matmul(embedding, self.weight_matrix), dim=0)
transformed_global = torch.tanh(global_context)
sigmoid_scores = torch.sigmoid(torch.mm(embedding, transformed_global.view(-1, 1)))
representation = torch.mm(torch.t(embedding), sigmoid_scores)
return representation
class GCN(nn.Module):
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 16
self.bottle_neck_neurons = 8
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.attention = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v2(nn.Module):
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v2, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 64
self.filters_3 = 32
self.filters_4 = 32
self.bottle_neck_neurons = 16
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.convolution_4 = GCNConv(in_channels=self.filters_3, out_channels=self.filters_4)
self.attention = AttentionModule(self.filters_4)
self.fully_connected_first = nn.Linear(self.filters_4, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_4(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v3(nn.Module):
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v3, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 16
self.bottle_neck_neurons = 16
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.attention = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v4(nn.Module):
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v4, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 32
self.bottle_neck_neurons = 32
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.attention = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v5(nn.Module):
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v5, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 80
self.filters_2 = 64
self.filters_3 = 64
self.bottle_neck_neurons_1 = 64
self.bottle_neck_neurons_2 = 32
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.attention = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons_1)
self.fully_connected_second = nn.Linear(self.bottle_neck_neurons_1, self.bottle_neck_neurons_2)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons_2, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(x=features, edge_index=adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = nn.functional.relu(self.fully_connected_second(scores))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_FC_v2(nn.Module):
"""
without the second FC.
"""
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_FC_v2, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 16
self.bottle_neck_neurons = 8
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.attention = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.num_classes)
# self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = self.attention(features)
pooled_features = torch.t(pooled_features)
scores = self.fully_connected_first(pooled_features)
# scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_att_v2(nn.Module):
"""
Instead of th attention mechanism, an unweighted sum is performed.
"""
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_att_v2, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 16
self.bottle_neck_neurons = 8
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
# self.unweighted_sum = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = torch.mean(features, dim=0).unsqueeze(0) # sum(features).unsqueeze(0)
# pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_att_v3(nn.Module):
"""
Instead of th attention mechanism, an degree weighted sum is performed.
"""
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_att_v3, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 16
self.bottle_neck_neurons = 8
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features, neighbors):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
degrees_tmp = np.array([len(l) for l in neighbors])
degrees = torch.from_numpy(degrees_tmp).to(device) # torch.from_numpy(degrees_tmp/sum(degrees_tmp)).to(device)
pooled_features = torch.mean(torch.mul(features,degrees[:,None]), 0)
# pooled_features = np.average(features.detach().cpu(), axis=0, weights=degrees, returned=False)
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# pooled_features = torch.from_numpy(pooled_features).to(device)
# pooled_features.requires_grad = True
pooled_features = pooled_features.unsqueeze(0)
scores = nn.functional.relu(self.fully_connected_first(pooled_features.float()))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v4_att_v2(nn.Module):
"""
Instead of th attention mechanism, an unweighted avg is performed.
"""
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v4_att_v2, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 32
self.bottle_neck_neurons = 32
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
# self.unweighted_sum = AttentionModule(self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
pooled_features = torch.mean(features, dim=0).unsqueeze(0) # sum(features).unsqueeze(0)
# pooled_features = torch.t(pooled_features)
scores = nn.functional.relu(self.fully_connected_first(pooled_features))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
class GCN_CN_v4_att_v3(nn.Module):
"""
Instead of th attention mechanism, an degree weighted sum is performed.
"""
def __init__(self, feature_dim_size, num_classes, dropout):
super(GCN_CN_v4_att_v3, self).__init__()
self.number_labels = feature_dim_size
self.num_classes = num_classes
self.filters_1 = 64
self.filters_2 = 32
self.filters_3 = 32
self.bottle_neck_neurons = 32
self.convolution_1 = GCNConv(in_channels=self.number_labels, out_channels=self.filters_1)
self.convolution_2 = GCNConv(in_channels=self.filters_1, out_channels=self.filters_2)
self.convolution_3 = GCNConv(in_channels=self.filters_2, out_channels=self.filters_3)
self.fully_connected_first = nn.Linear(self.filters_3, self.bottle_neck_neurons)
self.scoring_layer = nn.Linear(self.bottle_neck_neurons, self.num_classes)
self.dropout = dropout
def forward(self, adj, features, neighbors):
features = self.convolution_1(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_2(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = self.convolution_3(features, adj)
features = nn.functional.relu(features)
features = nn.functional.dropout(features,
p=self.dropout,
training=self.training)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
degrees_tmp = np.array([len(l) for l in neighbors])
degrees = torch.from_numpy(degrees_tmp).to(device) # torch.from_numpy(degrees_tmp/sum(degrees_tmp)).to(device)
pooled_features = torch.mean(torch.mul(features,degrees[:,None]), 0)
# pooled_features = np.average(features.detach().cpu(), axis=0, weights=degrees, returned=False)
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# pooled_features = torch.from_numpy(pooled_features).to(device)
# pooled_features.requires_grad = True
pooled_features = pooled_features.unsqueeze(0)
scores = nn.functional.relu(self.fully_connected_first(pooled_features.float()))
scores = self.scoring_layer(scores)
score = F.log_softmax(scores, dim=1)
return score
| 24,497 | 44.450835 | 119 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/Graph_classification/train_GCN.py | #! /usr/bin/env python
from GCN import *
from datetime import datetime
from utils.my_utils import *
from utils.util import *
import time
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import math
from train_utils import *
torch.manual_seed(124)
np.random.seed(124)
# Parameters
# ==================================================
parser = ArgumentParser("GCN", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--run_folder", default="../", help="")
parser.add_argument("--dataset", default="Traceparts_6/Graphml_Models/", help="Name of the graph (.graphml) dataset.")
parser.add_argument("--learning_rate", default=0.0005, type=float, help="Learning rate")
parser.add_argument("--batch_size", default=1, type=int, help="Batch Size")
parser.add_argument("--num_epochs", default=50, type=int, help="Number of training epochs")
parser.add_argument("--dropout", default=0.5, type=float, help="")
args = parser.parse_args()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print("The calculations will be performed on the device:", device)
# save paths
model_name = args.dataset + "_" + str(datetime.today().strftime('%m-%d'))
out_dir = os.path.abspath(os.path.join(args.run_folder, "./results/runs_GCN", args.dataset))
if not os.path.exists(out_dir + "/Models/"):
os.makedirs(out_dir + "/Models/")
save_path = out_dir + "/Models/" + model_name
print("Results will be saved in:", out_dir)
print(" The model will be saved as:", save_path)
print("Settings:",args)
# Load Graph data
# ==================================================
print("Loading data...")
use_degree_as_tag = False
fold = 0
graphs, num_classes = my_load_data(args.dataset, use_degree_as_tag)
train_graphs, test_graphs = separate_data(graphs, fold)
train_graphs, valid_graphs = split_data(train_graphs, perc=0.9)
print("# training graphs: ", len(train_graphs))
print_data_commposition(train_graphs)
print("# validation graphs: ", len(valid_graphs))
print_data_commposition(valid_graphs)
print("# test graphs: ", len(test_graphs))
print_data_commposition(test_graphs)
feature_dim_size = graphs[0].node_features.shape[1]
print("Loading data... finished!")
# Model
# =============================================================
# Create a GCN model
model = GCN_CN_v4(feature_dim_size=feature_dim_size, num_classes=num_classes, dropout=args.dropout).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
num_batches_per_epoch = int((len(train_graphs) - 1) / args.batch_size) + 1
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=num_batches_per_epoch, gamma=0.1)
# Main process
# =============================================================
print("Writing to {}\n".format(out_dir))
# Checkpoint directory
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
write_acc = open(checkpoint_prefix + '_acc.txt', 'w')
train_losses = []
train_accuracy = []
valid_losses = []
valid_accuracy = []
valid_accuracy_x_class = []
best_loss = math.inf
best_accuracy = 0
# Train loop
for epoch in range(1, args.num_epochs + 1):
epoch_start_time = time.time()
# train model
train(mmodel=model, optimizer=optimizer, train_graphs=train_graphs, batch_size=args.batch_size, num_classes=num_classes, device=device)
# evaluate on train data
train_loss, train_acc, _ = evaluate(mmodel=model, current_graphs=train_graphs, batch_size=args.batch_size, num_classes=num_classes, device=device, out_dir=out_dir)
# evaluate on validation data
valid_loss, valid_acc, valid_acc_x_class = evaluate(mmodel=model, current_graphs=valid_graphs, batch_size=args.batch_size, num_classes=num_classes, device=device, out_dir=out_dir)
print('| epoch {:3d} | time: {:5.2f}s | train loss {:5.2f} | valid loss {:5.2f} | valid acc {:5.2f} | '.format(epoch, (time.time() - epoch_start_time), train_loss, valid_loss, valid_acc*100))
train_losses.append(train_loss)
train_accuracy.append(train_acc)
valid_losses.append(valid_loss)
valid_accuracy.append(valid_acc)
valid_accuracy_x_class.append(valid_acc_x_class)
# Make a step of the optimizer if the mean of the last 6 epochs were better than the current epoch
if epoch > 5 and train_losses[-1] > np.mean(train_losses[-6:-1]):
scheduler.step()
print("Scheduler step")
# save if best performance ever
if best_accuracy < valid_acc or (best_accuracy == valid_acc and best_loss > valid_loss):
print("Save at epoch: {:3d} at valid loss: {:5.2f} and valid accuracy: {:5.2f}".format(epoch, valid_loss, valid_acc*100))
best_accuracy = valid_acc
best_loss = valid_loss
torch.save(model.state_dict(), save_path)
write_acc.write('epoch ' + str(epoch) + ' fold ' + str(fold) + ' acc ' + str(valid_acc*100) + '%\n')
# Plot results
# =============================================================
valid_accuracy_x_class = np.array(valid_accuracy_x_class).T
# plot training flow
plot_training_flow(ys=[train_losses, valid_losses], names=["train", "validation"], path=out_dir, fig_name="/losses_flow", y_axis="Loss")
plot_training_flow(ys=[np.array(train_accuracy)*100, np.array(valid_accuracy)*100], names=["train","validation"], path=out_dir, fig_name="/accuracy_flow", y_axis="Accuracy")
# Evaluate on test data
model.load_state_dict(torch.load(save_path))
test_loss, test_acc, _ = evaluate(mmodel=model, current_graphs=test_graphs, batch_size=args.batch_size, num_classes=num_classes, device=device, out_dir=out_dir, last_round=True)
print("Evaluate: loss on test: ", test_loss, " and accuracy: ", test_acc * 100)
write_acc.close() | 5,816 | 45.166667 | 195 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/Graph_classification/train_utils.py | from sklearn import metrics
from sklearn.metrics import confusion_matrix
import numpy as np
import torch
import seaborn as sns
import matplotlib.pyplot as plt
def label_smoothing(true_labels: torch.Tensor, classes: int, smoothing=0.1):
"""
if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
assert 0 <= smoothing < 1
confidence = 1.0 - smoothing
label_shape = torch.Size((true_labels.size(0), classes))
with torch.no_grad():
true_dist = torch.empty(size=label_shape, device=true_labels.device)
true_dist.fill_(smoothing / (classes - 1))
true_labels = true_labels.type(torch.int64)
true_dist.scatter_(1, true_labels.data.unsqueeze(1), confidence)
return true_dist
def get_Adj_matrix(batch_graph):
edge_mat_list = []
start_idx = [0]
for i, graph in enumerate(batch_graph):
start_idx.append(start_idx[i] + len(graph.g))
edge_mat_list.append(graph.edge_mat + start_idx[i])
Adj_block_idx = np.concatenate(edge_mat_list, 1)
# Adj_block_elem = np.ones(Adj_block_idx.shape[1])
Adj_block_idx_row = Adj_block_idx[0,:]
Adj_block_idx_cl = Adj_block_idx[1,:]
return Adj_block_idx_row, Adj_block_idx_cl
def get_graphpool(batch_graph, device):
start_idx = [0]
# compute the padded neighbor list
for i, graph in enumerate(batch_graph):
start_idx.append(start_idx[i] + len(graph.g))
idx = []
elem = []
for i, graph in enumerate(batch_graph):
elem.extend([1] * len(graph.g))
idx.extend([[i, j] for j in range(start_idx[i], start_idx[i + 1], 1)])
elem = torch.FloatTensor(elem)
idx = torch.LongTensor(idx).transpose(0, 1)
graph_pool = torch.sparse.FloatTensor(idx, elem, torch.Size([len(batch_graph), start_idx[-1]]))
return graph_pool.to(device)
def get_batch_data(batch_graph, device):
X_concat = np.concatenate([graph.node_features for graph in batch_graph], 0)
X_concat = torch.from_numpy(X_concat).to(device)
# graph-level sum pooling
adjj = np.concatenate([graph.edge_mat for graph in batch_graph], 0)
adjj = torch.from_numpy(adjj).to(device)
graph_labels = np.array([graph.label for graph in batch_graph])
graph_labels = torch.from_numpy(graph_labels).to(device)
return X_concat, graph_labels, adjj.to(torch.int64)
def cross_entropy(pred, soft_targets): # use nn.CrossEntropyLoss if not using soft labels in Line 159
logsoftmax = torch.nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(- soft_targets * logsoftmax(pred), 1))
def train(mmodel, optimizer, train_graphs, batch_size, num_classes, device):
# Turn on the train mode
mmodel.train()
indices = np.arange(0, len(train_graphs))
np.random.shuffle(indices)
for start in range(0, len(train_graphs), batch_size):
end = start + batch_size
selected_idx = indices[start:end]
batch_graph = [train_graphs[idx] for idx in selected_idx]
# load graph batch
X_concat, graph_labels, adjj = get_batch_data(batch_graph, device=device)
graph_labels = label_smoothing(graph_labels, num_classes)
optimizer.zero_grad()
# model probability scores
prediction_scores = mmodel(adjj, X_concat)
loss = cross_entropy(prediction_scores, graph_labels)
# backward pass
loss.backward()
torch.nn.utils.clip_grad_norm_(mmodel.parameters(), 0.5) # prevent the exploding gradient problem
optimizer.step()
def evaluate(mmodel, current_graphs, batch_size, num_classes, device, out_dir, last_round=False):
# Turn on the evaluation mode
mmodel.eval()
total_loss = 0.
with torch.no_grad():
# evaluating
prediction_output = []
idx = np.arange(len(current_graphs))
for i in range(0, len(current_graphs), batch_size):
sampled_idx = idx[i:i + batch_size]
if len(sampled_idx) == 0:
continue
batch_test_graphs = [current_graphs[j] for j in sampled_idx]
# load graph batch
test_X_concat, test_graph_labels, test_adj = get_batch_data(batch_test_graphs, device=device)
# model probability scores
prediction_scores = mmodel(test_adj, test_X_concat)
test_graph_labels = label_smoothing(test_graph_labels, num_classes)
loss = cross_entropy(prediction_scores, test_graph_labels)
total_loss += loss.item()
prediction_output.append(prediction_scores.detach())
# model probabilities output
prediction_output = torch.cat(prediction_output, 0)
# predicted labels
predictions = prediction_output.max(1, keepdim=True)[1]
# real labels
labels = torch.LongTensor([graph.label for graph in current_graphs]).to(device)
# num correct predictions
correct = predictions.eq(labels.view_as(predictions)).sum().cpu().item()
accuracy = correct / float(len(current_graphs))
# confusion matrix and class accuracy
matrix = confusion_matrix(np.array(labels.cpu()), np.array(predictions.cpu()))
matrix = matrix.astype('float') / matrix.sum(axis=1)[:, np.newaxis]
acc_x_class = matrix.diagonal() * 100
if last_round:
# plot and save statistics
print("Accuracy per class :")
print(acc_x_class)
with open(out_dir + "/test_results.txt", 'w') as f:
f.write("Evaluate: loss on test: "+ str(total_loss/len(current_graphs)) + " and accuracy: " + str(accuracy * 100)+"\n")
f.write("Accuracy per class : "+ str(matrix.diagonal())+"\n")
f.write(metrics.classification_report(np.array(labels.cpu()), np.array(predictions.cpu()), digits=3))
ax = sns.heatmap(matrix, annot=True, cmap='Blues')
ax.set_title('Confusion Matrix')
plt.savefig(out_dir + "/Confusion Matrix")
return total_loss/len(current_graphs), accuracy, acc_x_class | 5,946 | 37.869281 | 131 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/Graph_classification/utils/util.py | import os
import networkx as nx
import numpy as np
import random
import scipy.sparse as sp
from sklearn.model_selection import StratifiedKFold
"""Adapted from https://github.com/weihua916/powerful-gnns/blob/master/util.py"""
class S2VGraph(object):
def __init__(self, g, label, node_tags=None, node_features=None, name_graph=None):
'''
g: a networkx graph
label: an integer graph label
node_tags: a list of integer node tags
node_features: a torch float tensor, one-hot representation of the tag that is used as input to neural nets
edge_mat: a torch long tensor, contain edge list, will be used to create torch sparse tensor
neighbors: list of neighbors (without self-loop)
'''
self.label = label
self.g = g
self.node_tags = node_tags
self.neighbors = []
self.node_features = 0
self.edge_mat = 0
self.max_neighbor = 0
self.name_graph = name_graph
def my_load_data(dataset, degree_as_tag=False):
g_list = []
label_dict = {}
feat_dict = {}
dataset_path = "../Datasets/" + dataset+ "/"
# Carico un grafo, il valore dei nodi è il loro tipo
for dir in os.listdir(dataset_path):
if os.path.isdir(dataset_path + dir):
print("Loading class:",dir)
for file in os.listdir(dataset_path + dir + "/"):
if file.endswith(".graphml"):
g = nx.read_graphml(dataset_path + dir + "/" + file)
l = int(dir)
node_tags = []
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
for node in g:
node_lab = g.nodes[node]["type"]
if not node_lab in feat_dict:
mapped = len(feat_dict)
feat_dict[node_lab] = mapped
node_tags.append(feat_dict[node_lab])
g_list.append(S2VGraph(g, l, node_tags, name_graph=file))
# add labels and edge_mat
for g in g_list:
# i miei grafi hanno id in stringa del tipo "#1", qui li vuole in int. Li converto.
dict_node_id = {}
for node in g.g:
idx = node
if not idx in dict_node_id:
mapped = len(dict_node_id)
dict_node_id[idx] = mapped
g.neighbors = [[] for i in range(len(g.g))]
for i, j in g.g.edges():
int_i = dict_node_id[i]
int_j = dict_node_id[j]
g.neighbors[int_i].append(int_j)
g.neighbors[int_j].append(int_i)
degree_list = []
for i in range(len(g.g)):
g.neighbors[i] = g.neighbors[i]
degree_list.append(len(g.neighbors[i]))
g.max_neighbor = max(degree_list)
g.label = label_dict[g.label]
edges = []
for pair in g.g.edges():
g1, g2 = pair
edges.append([dict_node_id[g1], dict_node_id[g2]])
edges.extend([[i, j] for j, i in edges])
deg_list = list(dict(g.g.degree(range(len(g.g)))).values())
g.edge_mat = np.transpose(np.array(edges, dtype=np.int32), (1, 0))
if degree_as_tag:
for g in g_list:
g.node_tags = list(dict(g.g.degree).values())
# Extracting unique tag labels
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags))
tagset = list(tagset)
tag2index = {tagset[i]: i for i in range(len(tagset))}
for g in g_list:
g.node_features = np.zeros((len(g.node_tags), len(tagset)), dtype=np.float32)
g.node_features[range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags]] = 1
print('# classes: %d' % len(label_dict))
print('# maximum node tag: %d' % len(tagset))
print("# data: %d" % len(g_list))
return g_list, len(label_dict)
def load_data(dataset, degree_as_tag):
'''
dataset: name of dataset
test_proportion: ratio of test train split
seed: random seed for random splitting of dataset
'''
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
with open('../dataset/%s/%s.txt' % (dataset, dataset), 'r') as f:
n_g = int(f.readline().strip())
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
# n è il numero di nodi seguente
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
node_features = []
n_edges = 0
# per ogni nodo j
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
tmp = int(row[1]) + 2
if tmp == len(row):
# no node attributes
row = [int(w) for w in row]
attr = None
else:
row, attr = [int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
if tmp > len(row):
node_features.append(attr)
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
if node_features != []:
node_features = np.stack(node_features)
node_feature_flag = True
else:
node_features = None
node_feature_flag = False
assert len(g) == n
# g è il grafo, l è la classe del grafo, node_tags è una lista in cui per ogni nodo c'è l'attributo
g_list.append(S2VGraph(g, l, node_tags))
# add labels and edge_mat
for g in g_list:
g.neighbors = [[] for i in range(len(g.g))]
for i, j in g.g.edges():
g.neighbors[i].append(j)
g.neighbors[j].append(i)
degree_list = []
for i in range(len(g.g)):
g.neighbors[i] = g.neighbors[i]
degree_list.append(len(g.neighbors[i]))
g.max_neighbor = max(degree_list)
g.label = label_dict[g.label]
edges = [list(pair) for pair in g.g.edges()]
edges.extend([[i, j] for j, i in edges])
deg_list = list(dict(g.g.degree(range(len(g.g)))).values())
g.edge_mat = np.transpose(np.array(edges, dtype=np.int32), (1,0))
if degree_as_tag:
for g in g_list:
g.node_tags = list(dict(g.g.degree).values())
# Extracting unique tag labels
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags))
tagset = list(tagset)
tag2index = {tagset[i]:i for i in range(len(tagset))}
for g in g_list:
g.node_features = np.zeros((len(g.node_tags), len(tagset)), dtype=np.float32)
g.node_features[range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags]] = 1
print('# classes: %d' % len(label_dict))
print('# maximum node tag: %d' % len(tagset))
print("# data: %d" % len(g_list))
return g_list, len(label_dict)
def separate_data(graph_list, fold_idx, seed=0):
assert 0 <= fold_idx and fold_idx < 10, "fold_idx must be from 0 to 9."
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
labels = [graph.label for graph in graph_list]
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, test_idx = idx_list[fold_idx]
train_graph_list = [graph_list[i] for i in train_idx]
test_graph_list = [graph_list[i] for i in test_idx]
return train_graph_list, test_graph_list
"""Get indexes of train and test sets"""
def separate_data_idx(graph_list, fold_idx, seed=0):
assert 0 <= fold_idx and fold_idx < 10, "fold_idx must be from 0 to 9."
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
labels = [graph.label for graph in graph_list]
idx_list = []
for idx in skf.split(np.zeros(len(labels)), labels):
idx_list.append(idx)
train_idx, test_idx = idx_list[fold_idx]
return train_idx, test_idx
"""Convert sparse matrix to tuple representation."""
def sparse_to_tuple(sparse_mx):
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
| 8,984 | 32.778195 | 119 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/pointnet2_cls_ssg.py | import torch.nn as nn
import torch.nn.functional as F
from pointnet2_utils import PointNetSetAbstraction
class get_model(nn.Module):
def __init__(self,num_class,normal_channel=True):
super(get_model, self).__init__()
in_channel = 6 if normal_channel else 3
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstraction(npoint=512, radius=0.2, nsample=32, in_channel=in_channel, mlp=[64, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=256 + 3, mlp=[256, 512, 1024], group_all=True)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 256)
self.bn2 = nn.BatchNorm1d(256)
self.drop2 = nn.Dropout(0.4)
self.fc3 = nn.Linear(256, num_class)
def forward(self, xyz):
B, _, _ = xyz.shape
if self.normal_channel:
norm = xyz[:, 3:, :]
xyz = xyz[:, :3, :]
else:
norm = None
l1_xyz, l1_points = self.sa1(xyz, norm)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
x = l3_points.view(B, 1024)
x = self.drop1(F.relu(self.bn1(self.fc1(x))))
x = self.drop2(F.relu(self.bn2(self.fc2(x))))
x = self.fc3(x)
x = F.log_softmax(x, -1)
return x, l3_points
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
def forward(self, pred, target, trans_feat):
total_loss = F.nll_loss(pred, target)
return total_loss
| 1,814 | 34.588235 | 139 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/pointnet2_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from time import time
import numpy as np
def timeit(tag, t):
print("{}: {}s".format(tag, time() - t))
return time()
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):
"""
Input:
npoint:
radius:
nsample:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, npoint, nsample, 3]
new_points: sampled points data, [B, npoint, nsample, 3+D]
"""
B, N, C = xyz.shape
S = npoint
fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]
new_xyz = index_points(xyz, fps_idx)
idx = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, idx)
new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]
else:
new_points = grouped_xyz_norm
if returnfps:
return new_xyz, new_points, grouped_xyz, fps_idx
else:
return new_xyz, new_points
def sample_and_group_all(xyz, points):
"""
Input:
xyz: input points position data, [B, N, 3]
points: input points data, [B, N, D]
Return:
new_xyz: sampled points position data, [B, 1, 3]
new_points: sampled points data, [B, 1, N, 3+D]
"""
device = xyz.device
B, N, C = xyz.shape
new_xyz = torch.zeros(B, 1, C).to(device)
grouped_xyz = xyz.view(B, 1, N, C)
if points is not None:
new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)
else:
new_points = grouped_xyz
return new_xyz, new_points
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1) # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1)
return new_xyz, new_points
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1)
if points is not None:
points = points.permute(0, 2, 1)
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1) # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1)
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
return new_points
| 11,168 | 34.233438 | 104 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/pointnet2_cls_msg.py | import torch.nn as nn
import torch.nn.functional as F
from pointnet2_utils import PointNetSetAbstractionMsg, PointNetSetAbstraction
class get_model(nn.Module):
def __init__(self,num_class,normal_channel=True):
super(get_model, self).__init__()
in_channel = 3 if normal_channel else 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [16, 32, 128], in_channel,[[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.2, 0.4, 0.8], [32, 64, 128], 320,[[64, 64, 128], [128, 128, 256], [128, 128, 256]])
self.sa3 = PointNetSetAbstraction(None, None, None, 640 + 3, [256, 512, 1024], True)
self.fc1 = nn.Linear(1024, 512)
self.bn1 = nn.BatchNorm1d(512)
self.drop1 = nn.Dropout(0.4)
self.fc2 = nn.Linear(512, 256)
self.bn2 = nn.BatchNorm1d(256)
self.drop2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(256, num_class)
def forward(self, xyz):
B, _, _ = xyz.shape
if self.normal_channel:
norm = xyz[:, 3:, :]
xyz = xyz[:, :3, :]
else:
norm = None
l1_xyz, l1_points = self.sa1(xyz, norm)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
x = l3_points.view(B, 1024)
x = self.drop1(F.relu(self.bn1(self.fc1(x))))
x = self.drop2(F.relu(self.bn2(self.fc2(x))))
x = self.fc3(x)
x = F.log_softmax(x, -1)
return x,l3_points
class get_loss(nn.Module):
def __init__(self):
super(get_loss, self).__init__()
def forward(self, pred, target, trans_feat):
total_loss = F.nll_loss(pred, target)
return total_loss
| 1,797 | 33.576923 | 138 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/train_pointNet.py | import os
import sys
import torch
import numpy as np
import datetime
import logging
import provider
import importlib
import shutil
import argparse
from pathlib import Path
from tqdm import tqdm
from ModelNetDataLoader import ModelNetDataLoader
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('training')
parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--batch_size', type=int, default=12, help='batch size in training')
parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')
parser.add_argument('--num_category', default=8, type=int, choices=[6, 10, 40], help='training on ModelNet10/40')
parser.add_argument('--epoch', default=70, type=int, help='number of epoch in training')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')
parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')
parser.add_argument('--process_data', action='store_true', default=True, help='save data offline')
parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')
return parser.parse_args()
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def test(model, loader, num_class=40):
mean_correct = []
class_acc = np.zeros((num_class, 3))
classifier = model.eval()
for j, (points, target) in tqdm(enumerate(loader), total=len(loader)):
if not args.use_cpu:
points, target = points.cuda(), target.cuda()
points = points.transpose(2, 1)
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()
class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0])
class_acc[cat, 1] += 1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1]
class_acc = np.mean(class_acc[:, 2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
exp_dir = Path('./log/')
exp_dir.mkdir(exist_ok=True)
exp_dir = exp_dir.joinpath('classification')
exp_dir.mkdir(exist_ok=True)
if args.log_dir is None:
exp_dir = exp_dir.joinpath(timestr)
else:
exp_dir = exp_dir.joinpath(args.log_dir)
exp_dir.mkdir(exist_ok=True)
checkpoints_dir = exp_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = exp_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
args.model = "pointnet_cls" # prima era pointnet_cls
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
data_path = 'data/modelnet40_normal_resampled/'
train_dataset = ModelNetDataLoader(root=data_path, args=args, split='train', process_data=args.process_data)
test_dataset = ModelNetDataLoader(root=data_path, args=args, split='test', process_data=args.process_data)
trainDataLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''MODEL LOADING'''
num_class = args.num_category
model = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(exp_dir))
shutil.copy('models/pointnet2_utils.py', str(exp_dir))
shutil.copy('./train_classification.py', str(exp_dir))
classifier = model.get_model(num_class, normal_channel=args.use_normals)
criterion = model.get_loss()
classifier.apply(inplace_relu)
if not args.use_cpu:
classifier = classifier.cuda()
criterion = criterion.cuda()
try:
checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
'''TRANING'''
logger.info('Start training...')
for epoch in range(start_epoch, args.epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
mean_correct = []
classifier = classifier.train()
scheduler.step()
for batch_id, (points, target) in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
optimizer.zero_grad()
points = points.data.numpy()
points = provider.random_point_dropout(points)
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
points = points.transpose(2, 1)
if not args.use_cpu:
points, target = points.cuda(), target.cuda()
pred, trans_feat = classifier(points)
loss = criterion(pred, target.long(), trans_feat)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, num_class=num_class)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f' % (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': best_epoch,
'instance_acc': instance_acc,
'class_acc': class_acc,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
args = parse_args()
main(args)
| 8,887 | 37.812227 | 138 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/pointnet_cls.py | import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from pointnet_utils import PointNetEncoder, feature_transform_reguliarzer
class get_model(nn.Module):
def __init__(self, k=40, normal_channel=True):
super(get_model, self).__init__()
if normal_channel:
channel = 6
else:
channel = 3
self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.4)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x, trans_feat
class get_loss(torch.nn.Module):
def __init__(self, mat_diff_loss_scale=0.001):
super(get_loss, self).__init__()
self.mat_diff_loss_scale = mat_diff_loss_scale
def forward(self, pred, target, trans_feat):
loss = F.nll_loss(pred, target)
mat_diff_loss = feature_transform_reguliarzer(trans_feat)
total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
return total_loss
| 1,414 | 33.512195 | 94 | py |
3D_STEP_Classification | 3D_STEP_Classification-main/PointNet_Classifiication/ModelNetDataLoader.py | '''
@author: Xu Yan
@file: ModelNet.py
@time: 2021/3/19 15:51
'''
import os
import numpy as np
import warnings
import pickle
from tqdm import tqdm
from torch.utils.data import Dataset
warnings.filterwarnings('ignore')
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def farthest_point_sample(point, npoint):
"""
Input:
xyz: pointcloud data, [N, D]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [npoint, D]
"""
N, D = point.shape
xyz = point[:,:3]
centroids = np.zeros((npoint,))
distance = np.ones((N,)) * 1e10
farthest = np.random.randint(0, N)
for i in range(npoint):
centroids[i] = farthest
centroid = xyz[farthest, :]
dist = np.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = np.argmax(distance, -1)
point = point[centroids.astype(np.int32)]
return point
class ModelNetDataLoader(Dataset):
def __init__(self, root, args, split='train', process_data=False):
self.root = root
self.npoints = args.num_point
self.process_data = process_data
self.uniform = args.use_uniform_sample
self.use_normals = args.use_normals
self.num_category = args.num_category
if self.num_category == 10:
self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
else:
self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt')
self.cat = [line.rstrip() for line in open(self.catfile)]
self.classes = dict(zip(self.cat, range(len(self.cat))))
shape_ids = {}
if self.num_category == 10:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
else:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
assert (split == 'train' or split == 'test')
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i
in range(len(shape_ids[split]))]
print('The size of %s data is %d' % (split, len(self.datapath)))
if self.uniform:
self.save_path = os.path.join(root, 'modelnet%d_%s_%dpts_fps.dat' % (self.num_category, split, self.npoints))
else:
self.save_path = os.path.join(root, 'modelnet%d_%s_%dpts.dat' % (self.num_category, split, self.npoints))
if self.process_data:
if not os.path.exists(self.save_path):
print('Processing data %s (only running in the first time)...' % self.save_path)
self.list_of_points = [None] * len(self.datapath)
self.list_of_labels = [None] * len(self.datapath)
for index in tqdm(range(len(self.datapath)), total=len(self.datapath)):
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1], delimiter=',').astype(np.float32)
if self.uniform:
point_set = farthest_point_sample(point_set, self.npoints)
else:
point_set = point_set[0:self.npoints, :]
self.list_of_points[index] = point_set
self.list_of_labels[index] = cls
with open(self.save_path, 'wb') as f:
pickle.dump([self.list_of_points, self.list_of_labels], f)
else:
print('Load processed data from %s...' % self.save_path)
with open(self.save_path, 'rb') as f:
self.list_of_points, self.list_of_labels = pickle.load(f)
def __len__(self):
return len(self.datapath)
def _get_item(self, index):
if self.process_data:
point_set, label = self.list_of_points[index], self.list_of_labels[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
label = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1], delimiter=',').astype(np.float32)
if self.uniform:
point_set = farthest_point_sample(point_set, self.npoints)
else:
point_set = point_set[0:self.npoints, :]
point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
if not self.use_normals:
point_set = point_set[:, 0:3]
return point_set, label[0]
def __getitem__(self, index):
return self._get_item(index)
if __name__ == '__main__':
import torch
data = ModelNetDataLoader('/data/modelnet40_normal_resampled/', split='train')
DataLoader = torch.utils.data.DataLoader(data, batch_size=12, shuffle=True)
for point, label in DataLoader:
print(point.shape)
print(label.shape)
| 5,470 | 36.217687 | 121 | py |
How-to-0wn-NAS-in-Your-Spare-Time | How-to-0wn-NAS-in-Your-Spare-Time-master/reconstruct_malconv.py | """
Reconstruct the MalConv architecture from the Flush+Reload results
"""
# basic
import os
import json
import argparse
from math import ceil
from copy import deepcopy
from itertools import product
# externals (numpy, networkx, matplotlib)
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# custom utils
from utils.loaders import load_from_csv
from utils.computations import _COMPUTE_UNARY_PT, _COMPUTE_BINARY_PT, _check_computation
from utils.architectures import _same_inout_dimensions, _sane_narrow_connection
# ------------------------------------------------------------------------------
# Global variables
# ------------------------------------------------------------------------------
_save_dir = os.path.join('results', 'reconstruct', 'malconv')
_datafile = 'dataset'
_zerocase = 1 # the margin that we allow when the processing time is '0'
_lin_case = 0.1 # the margin that we allow (for the linear layers)
_convcase = 0.05 # " (for the conv layers)
# ------------------------------------------------------------------------------
# Reconstruction code
# ------------------------------------------------------------------------------
def reconstruct_malconv( \
csvfile, indim, outdim, \
timer, dataloc, resolution=2000, verbose=True):
# load the data from the csvfile
csv_data = load_from_csv(csvfile)
assert csv_data, 'Error: cannot read the content of [{}]'.format(csvfile)
csv_data = [ \
(each_data[0], float(each_data[1]), float(each_data[2])) for each_data in csv_data]
print ('[reconstruction] load the total [{}] events'.format(len(csv_data)))
# reconstruction of the computational graphs
# based on the architecture characteristics
computational_graphs = reconstruct_computational_graphs(csv_data, stdout=verbose)
print ('[reconstruction] identified ' + \
'[{}] computational graphs'.format(len(computational_graphs)))
# decide the reconstructed computational graphs
# with the possible attribute parameter combinations
architecture_graphs = \
reconstruct_attribute_parameters( \
computational_graphs, csv_data, timer, dataloc, resolution)
print ('[reconstruction] we decided the ' + \
'candidates into [{}]'.format(len(architecture_graphs)))
# prune with more rules
architecture_graphs = \
prune_reconstructed_architectures(architecture_graphs)
print ('[reconstruction] we pruned the ' + \
'candidates into [{}]'.format(len(architecture_graphs)))
# returns the architectures
return architecture_graphs
def reconstruct_computational_graphs(fcalls, stdout=False):
# output the function calls (input)
if stdout:
print ('----- Func. calls -----')
for ecall in fcalls: print (ecall)
print ('-----------------------')
# recursively create the graph
extract_fnames = [fcall[0] for _, fcall in enumerate(fcalls)]
compute_graphs = _recursive_reconstruction_graphs(extract_fnames)
# check if the store location exists
store_loc = os.path.join(_save_dir, 'computational_graphs')
if not os.path.exists(store_loc): os.makedirs(store_loc)
# store only the graphs
compute_graphs = [each_graph for (_, each_graph) in compute_graphs]
# store the visualizations
options = {
'node_color': 'red',
'node_size': 40,
'width': 1,
'alpha': 0.8,
'arrowstyle': '-|>',
'arrowsize': 8,
'font_size': 10,
}
for cidx, each_graph in enumerate(compute_graphs):
nx.draw_networkx(each_graph, arrows=True, **options)
plt.savefig(os.path.join(store_loc, 'arch_{}.pdf'.format(cidx)))
plt.clf()
return compute_graphs
def _recursive_reconstruction_graphs(efcalls):
# estimation starts from the back...
last_fcall = efcalls.pop()
# ----------------------------------------
# Base case: when there is no more calls
# ----------------------------------------
if not efcalls:
# : create a graph and return with the last element
newG = nx.DiGraph()
newG.add_node(last_fcall)
new_data = (last_fcall, newG)
return [new_data]
# ----------------------------------------
# Recursion: perform DFSes
# ----------------------------------------
else:
if _check_computation(last_fcall, _COMPUTE_UNARY_PT):
# :: create an edge for each graph in the list
prevGs = _recursive_reconstruction_graphs(efcalls.copy())
uretGs = []
for (prev_fcall, prevG) in prevGs:
prevG.add_edge(prev_fcall, last_fcall)
uretGs.append((last_fcall, prevG))
return uretGs
elif _check_computation(last_fcall, _COMPUTE_BINARY_PT):
"""
For each preceding element except the previous one,
we assume a branch started from there and split the
elements in between into two sets.
ex. a -> b -> c -> e -> f
b -> d -> e
we assumed 'b' is the branch start, then:
split the [c, d] into two sets, i.e.,
([], [c,d]), ([c], [d]) -- candidates.
"""
bretGs = []
# :: assume, for each preceding element, a branch exists
for bidx in range(1, len(efcalls)-1):
fcalls_between = efcalls[bidx:len(efcalls)]
fcalls_remains = efcalls[:bidx]
# ::: Operations:
# - split the f-calls in between into two lists
# - recusively run the reconstructions for each
for sidx in range(len(fcalls_between)):
# - split
fcalls_branch1 = (fcalls_remains + fcalls_between[:sidx])
fcalls_branch2 = (fcalls_remains + fcalls_between[sidx:])
# - recursive reconstructions
prevGs_branch1 = _recursive_reconstruction_graphs(fcalls_branch1.copy())
prevGs_branch2 = _recursive_reconstruction_graphs(fcalls_branch2.copy())
# - combine them
prevGs = []
for (prev_fcall1, prevG1), (prev_fcall2, prevG2) \
in product(prevGs_branch1, prevGs_branch2):
bretG = nx.compose(prevG1, prevG2)
bretG.add_edge(prev_fcall1, last_fcall)
bretG.add_edge(prev_fcall2, last_fcall)
bretGs.append((last_fcall, bretG))
# end for (prev...
# end for sidx...
# end for bidx...
return bretGs
else:
assert False, ('[_recursive_reconstruction_graphs] ' + \
'Unknown layer - [{}]'.format(last_fcall))
# done (the function will not be reached at this point...)
def reconstruct_attribute_parameters( \
compute_graphs, fcalls, timer, dataloc, resolution):
# choose the parameter candidates
# for the (convolutional and linear) operations
parameter_database = \
load_parameter_database(fcalls, timer, dataloc, resolution)
# data-holders
architecture_graphs = []
compute_start = fcalls[len(fcalls)-1][0] # ex. '[12] Sigmoid', '[0] Embedding'
compute_termi = fcalls[0][0]
start_dimension = (1,)
termi_dimension = (2*1000*1000, 8)
# loop through the candidates
for cidx, each_compute_graph in enumerate(compute_graphs):
"""
Note: this recursion doesn't work for the different structures of
computational graphs: at the initial call, we provide the only
one structure in the list argument, ex. [each_compute_graph].
"""
# : do reconstruction of parameters
cur_arch_graphs = \
_recursive_reconstruction_parameters( \
[each_compute_graph], parameter_database, \
compute_start, start_dimension, \
compute_termi, termi_dimension, \
reverse=True, verbose=False)
# : store when it's not None
if cur_arch_graphs:
# :: reversing the results
cur_arch_graphs = [ \
each_graph.reverse() for each_graph in cur_arch_graphs]
# :: store to the list
architecture_graphs += cur_arch_graphs
# end for cidx...
# check if the store location exists
store_loc = os.path.join(_save_dir, 'architecture_candidates')
if not os.path.exists(store_loc): os.makedirs(store_loc)
# store the visualizations
options = {
'node_color': 'red',
'node_size': 40,
'width': 1,
'alpha': 0.8,
'arrowstyle': '-|>',
'arrowsize': 8,
'font_size': 10,
}
for cidx, each_agraph in enumerate(architecture_graphs):
# : relabel the node names to include the attribute parameters
new_alabels = {}
for node, data in each_agraph.nodes(data=True):
new_attr = '{}'.format(node)
if ('attr_param' in data) \
and data['attr_param']:
new_attr += ' - {}'.format(data['attr_param'])
new_alabels[node] = new_attr
new_agraph = nx.relabel_nodes(each_agraph, new_alabels, copy=True)
# : networkx - draw the graphs
nx.draw_networkx(new_agraph, arrows=True, **options)
plt.savefig(os.path.join(store_loc, 'arch_params_{}.pdf'.format(cidx)))
plt.clf()
return architecture_graphs
def load_parameter_database( \
efcalls, ctimer, dataloc, resolution):
# load the data from the stored locations
profile_datasets = {
'conv': _load_dataset('conv', dataloc, ctimer),
'fc' : _load_dataset('fc', dataloc, ctimer),
}
# data-holders
candidate_params = {}
# loop through the extracted calls
for (cname, cwhen, ctime) in efcalls:
# : estimate the candidate parameters
# based on the linear computation profiles...
if 'FC' in cname:
if not ctime:
# :: set the lower/upper bounds
lower_bound, upper_bound = 0, _zerocase
if ctimer == 'tsc':
lower_bound, upper_bound = \
lower_bound*resolution, upper_bound*resolution
# :: collect the candidates
cur_candidates = []
for each_profile in profile_datasets['fc']:
"""
Profile data: in, out, comp, time
"""
if lower_bound <= each_profile[3] <= upper_bound:
cur_candidates.append(tuple(each_profile))
cur_candidates = list(set(cur_candidates))
# :: add to the data-holder
candidate_params[cname] = cur_candidates
else:
# :: set the lower/upper bounds
lower_bound, upper_bound = \
ctime * (1. - _lin_case), ctime * (1. + _lin_case)
if ctimer == 'tsc':
lower_bound, upper_bound = \
lower_bound*resolution, upper_bound*resolution
# :: collect the candidates
cur_candidates = []
for each_profile in profile_datasets['fc']:
"""
Profile data: in, out, comp, time
"""
if lower_bound <= each_profile[3] <= upper_bound:
cur_candidates.append(tuple(each_profile))
cur_candidates = list(set(cur_candidates))
# :: add to the data-holder
candidate_params[cname] = cur_candidates
# : estimate the candidate parameters
# based on the linear computation profiles...
elif 'Convolution' in cname:
if not ctime:
# :: set the lower/upper bounds
lower_bound, upper_bound = 0, _zerocase
if ctimer == 'tsc':
lower_bound, upper_bound = \
lower_bound*resolution, upper_bound*resolution
# :: collect the candidates
cur_candidates = []
for each_profile in profile_datasets['conv']:
"""
Profile data: data, in, out, kern, stride, comp, time
"""
if lower_bound <= each_profile[6] <= upper_bound:
cur_candidates.append(tuple(each_profile))
cur_candidates = list(set(cur_candidates))
# :: add to the data-holder
candidate_params[cname] = cur_candidates
else:
# :: set the lower/upper bounds
lower_bound, upper_bound = \
ctime * (1. - _convcase), ctime * (1. + _convcase)
if ctimer == 'tsc':
lower_bound, upper_bound = \
lower_bound*resolution, upper_bound*resolution
# :: collect the candidates
cur_candidates = []
for each_profile in profile_datasets['conv']:
"""
Profile data: data, in, out, kern, stride, comp, time
"""
if lower_bound <= each_profile[6] <= upper_bound:
cur_candidates.append(tuple(each_profile))
cur_candidates = list(set(cur_candidates))
# :: add to the data-holder
candidate_params[cname] = cur_candidates
else:
continue
# end for (cname...
# return the candidates
return candidate_params
def _load_dataset(dataset, dataloc, ctimer):
# compose the datafile to use
if ('tsc' == ctimer) or ('schannel' == ctimer):
datafname = os.path.join( \
dataloc, dataset, '{}.{}.npy'.format(_datafile, ctimer))
else:
assert False, ('[_load_dataset] Error - undefined timer - {}, abort'.format(ctimer))
# read the numpy data
profile_dataset = np.load(datafname)
return profile_dataset
def _recursive_reconstruction_parameters( \
compute_graphs, parameter_database, \
compute_curr, curr_dout, compute_term, term_dout, \
reverse=True, verbose=False):
# reverse the connection (only at the first call)
if reverse:
compute_graphs = [ \
compute_graph.reverse() \
for compute_graph in compute_graphs]
# print-out the status
if verbose:
print ('[_recursive_recon_params] ' + \
'\'{} {}\''.format(compute_curr, curr_dout) + \
' to end \'{} {}\''.format(compute_term, term_dout))
# ----------------------------------------
# Ops: Set the output dimension of a node
# ----------------------------------------
for compute_graph in compute_graphs:
# : set the node attribute
nx.set_node_attributes( \
compute_graph, { compute_curr: { 'out_dim': curr_dout } })
# constructed architectures (to return)
candidate_architectures = []
# ----------------------------------------
# Base case: when we reached the terminal
# ----------------------------------------
if compute_curr == compute_term:
# : check if we have the dimension as we expected
if curr_dout != term_dout:
if verbose:
print ('[_recursive_recon_params] base, ' + \
'{} != {}, fail.'.format(curr_dout, term_dout))
# return nothing, empty architectures
return candidate_architectures
else:
# :: print-out the status, base case
if verbose:
print ('[_recursive_recon_params] base, ' + \
'{} == {}, success.'.format(curr_dout, term_dout))
# return the architecture as a list: to compute...
candidate_architectures += compute_graphs
return candidate_architectures
# ----------------------------------------
# Recursion
# ----------------------------------------
else:
"""
Estimate the candidate parameters
1. 'conv, linear': choose based on the profile databases
2. 'same' in/out: use the same dimensions
3. 'transpose':
(1) 1-dim: use the same dimension
(2) 2-dim: swap the two axises
4. 'narrow': list of possible candidates (4 -> 4, 5, 6, 7, 8)
5. TBD...
"""
# : estimate candidate parameters
if ('FC' in compute_curr):
parameter_candidates = \
_search_linear_database(parameter_database, compute_curr, curr_dout)
elif ('Convolution' in compute_curr):
# :: search the candidates....
parameter_candidates = \
_search_conv1d_database(parameter_database, compute_curr, curr_dout)
# : when it have the same in/out dimensions
elif _same_inout_dimensions(compute_curr):
# :: no specific info, by pass the information
parameter_candidates = [(curr_dout, None)]
# : when it is the transpose operation
elif 'transpose' in compute_curr:
# :: based on the output dimensions
# (1D - no op, 2D - swap, 3D - swap any two)
parameter_candidates = []
if len(curr_dout) == 1:
parameter_candidates.append((curr_dout, None))
elif len(curr_dout) == 2:
swap_dout = tuple(reversed(curr_dout))
parameter_candidates.append((swap_dout, None))
else:
assert False, \
('[_recursive_reconstruction_parameters] ' + \
'transpose with {}-dims is undefined'.format(len(curr_dout)))
# : MaxPool1d: list all the factors of the dimensions
elif 'MaxPool1d' in compute_curr:
# :: consider all the factors of a dimension
# (under the assumption of kernel == dimension)
parameter_candidates = []
for each_factor in _compute_factors(2*1000*1000):
# [Note] that max-pool only decreases the dimension-size
if curr_dout[0] < each_factor:
parameter_candidates.append( \
((curr_dout[0], each_factor), None))
# : 'narrow', splits the dimension into two intervals
elif 'narrow' in compute_curr:
# :: data-holders
expand_curr = int(curr_dout[0])
expand_term = int(term_dout[1])
# :: compute the candidates: split a dimension
parameter_candidates = []
for each_dim in range(expand_curr+1, expand_term+1):
parameter_candidates.append(
((each_dim, curr_dout[1]), None))
# : 'view', usually used to linearize a multi-dimensional tensor
# into one dimensional tensor, before the linear operation
elif 'view' in compute_curr:
raise NotImplementedError
# : undefined cases...
else:
assert False, \
('[_recursive_reconstruction_parameters] ' + \
'undefined computation - {}'.format(compute_curr))
"""
Recursive computations:
"""
for each_pcandidate in parameter_candidates:
cur_candidate_indim = each_pcandidate[0]
cur_candidate_pinfo = each_pcandidate[1]
# --------------------------------------------------
# Ops: Set the input dimension and info, at the node
# --------------------------------------------------
for compute_graph in compute_graphs:
# ::: set the node attribute
nx.set_node_attributes( \
compute_graph, { \
compute_curr: {
'in_dim': cur_candidate_indim,
'attr_param': cur_candidate_pinfo,
}})
# :: copy the entire compute graphs
cur_compute_graphs = deepcopy(compute_graphs)
# :: loop over the each computational graph
for each_compute_graph in cur_compute_graphs:
# ::: data containers
# (list of computational graphs from successors)
list_of_compute_graphs_from_successors = []
# ::: loop over the multiple successors
for each_successor in each_compute_graph.successors(compute_curr):
# :::: recursively call for each successors
each_successor_compute_graphs = \
_recursive_reconstruction_parameters( \
[each_compute_graph], parameter_database, \
each_successor, cur_candidate_indim, \
compute_term, term_dout, reverse=False)
# :::: store them
list_of_compute_graphs_from_successors.append( \
each_successor_compute_graphs)
# ::: end for each successor
# ::: error check if the list from any successor is empty, skip
if not all(list_of_compute_graphs_from_successors): continue
"""
Post-process based on the number of successors
- combine the multiple successors from the list
"""
for chosen_compute_graphs \
in product(*list_of_compute_graphs_from_successors):
# :::: combine the multiple graphs into one
merged_compute_graph = None
for chosen_idx, chosen_compute_graph \
in enumerate(chosen_compute_graphs):
# -> initially assign to the data-holder
if not chosen_idx:
merged_compute_graph = chosen_compute_graph
else:
merged_compute_graph = nx.compose( \
merged_compute_graph, chosen_compute_graph)
# end if ...
# end for chosen...
# :::: store to the estimated architectures
if merged_compute_graph:
candidate_architectures.append(merged_compute_graph)
# ::: end for (compute...)
# :: end for each_compute...
# : end for each_p....
# end if compute_curr...
# return the estimated architectures...
return candidate_architectures
def _search_linear_database(database, computation, outdim):
# output channel dimension: ex. from (1)-tuple to 1-int
out_chdim = outdim[0]
# data-holders
param_info = []
# search over the database
for (each_cin, each_cout, each_tot, each_time) in database[computation]:
if each_cout == out_chdim:
# : conver the attributes into floats
each_cin = int(each_cin)
each_cout = int(each_cout)
# : store
# - channel input dimension: tuple
# - attributes : tuple
cur_chin = (each_cin,)
cur_attr = (each_cin, each_cout)
param_info.append((cur_chin, cur_attr))
# reduce the duplicates,
# and convert into the list of tuples
param_info = list(set(param_info))
# return candidates
return param_info
def _search_conv1d_database(database, computation, outdim):
# 1D, output dimension: ex. (8, 4000) -> 8
out_chdim, out_datdim = outdim
# data-holders
param_info = []
# search over the database
for (each_dat, each_cin, each_cout, \
each_kern, each_str, each_com, each_time) in database[computation]:
# : when the output channel matches, consider the details
if each_cout == out_chdim:
# :: store the dimension,
# when the computed out-dim
# is the same as the current out-dim
compute_outdim = ceil((each_dat - each_kern)/each_str + 1)
if compute_outdim == out_datdim:
# : conver the attributes into floats
each_dat = int(each_dat)
each_cin = int(each_cin)
each_cout = int(each_cout)
each_kern = int(each_kern)
each_str = int(each_str)
# :: store
# - channel input dimension: tuple
# - attributes : tuple
cur_chin = (each_cin, each_dat)
cur_attr = (each_cin, each_cout, each_kern, each_str)
param_info.append((cur_chin, cur_attr))
# reduce the duplicates,
# and convert into the list of tuples
param_info = list(set(param_info))
# return param_info
return param_info
def _compute_factors(number):
factors = []
for each_num in range(1, number+1):
if number % each_num == 0:
factors.append(each_num)
return factors
def prune_reconstructed_architectures(archs):
tot_archs = []
# remove the architecture doesn't make any sense
for each_arch in archs:
"""
1: Check if the narrow connection is sane
"""
if not _sane_narrow_connection( \
each_arch, '[0] Embedding', '[12] Sigmoid'): continue
# add the survived ones
tot_archs.append(each_arch)
# end for each...
# check if the store location exists
store_loc = os.path.join(_save_dir, 'architectures')
if not os.path.exists(store_loc): os.makedirs(store_loc)
# store the architecture as a graph and data
options = {
'node_color': 'red',
'node_size': 40,
'width': 1,
'alpha': 0.8,
'arrowstyle': '-|>',
'arrowsize': 8,
'font_size': 10,
}
for aidx, each_arch in enumerate(tot_archs):
# : write the edgelists to a YAML file
nx.write_yaml( \
each_arch, \
os.path.join(store_loc, 'architecture_{}.yaml'.format(aidx)))
# : relabel the node names to include the attribute parameters
new_nodes = {}
for each_node, each_data in each_arch.nodes(data=True):
each_attr = '{}'.format(each_node)
if ('attr_param' in each_data) \
and each_data['attr_param']:
each_attr += ' - {}'.format(each_data['attr_param'])
new_nodes[each_node] = each_attr
new_each_arch = nx.relabel_nodes(each_arch, new_nodes, copy=True)
# : networkx - draw the graphs
nx.draw_networkx(new_each_arch, arrows=True, **options)
plt.savefig(os.path.join(store_loc, 'architecture_{}.pdf'.format(aidx)))
plt.clf()
# end for aidx...
return tot_archs
# ------------------------------------------------------------------------------
# Main (for the command line compatibility)
# ------------------------------------------------------------------------------
if __name__ == '__main__':
# command line
parser = argparse.ArgumentParser( \
description='Reconstruct the MalConv architecture from the Flush+Reload trace.')
# load arguments
parser.add_argument('--in-dims', type=int, default=2000000,
help='the input dimension (default: 2000000)')
parser.add_argument('--outdims', type=int, default=1,
help='the output dimension (default: 1)')
# arguments about the profiled data
parser.add_argument('--c-timer', type=str, default='schannel',
help='the timer used to measure tsc/schannel (default: tsc)')
parser.add_argument('--frcycle', type=int, default=2000,
help='Flush+Reload attack resolution (default: 2000 cycles)')
parser.add_argument('--dataloc', type=str, default='datasets/profile/pytorch',
help='the location where the dataset is (default: dataset/profile/pytorch)')
# arguments about the processed traces
parser.add_argument('--tr-file', type=str, default='',
help='output file (csv data) location')
parser.add_argument('--verbose', action='store_true',
help='display debug messages (default: false)')
# load inputs
args = parser.parse_args()
print (json.dumps(vars(args), indent=2))
# do reconstruction
reconstruct_malconv( \
args.tr_file, args.in_dims, args.outdims, \
args.c_timer, args.dataloc, resolution=args.frcycle, \
verbose=args.verbose)
# Fin.
| 29,075 | 38.028188 | 100 | py |
chronological_probing | chronological_probing-main/code/prober.py | import os
from transformers import AutoTokenizer, AutoModel, pipeline
import torch
from tqdm import tqdm
import pickle
import numpy as np
from nltk.tokenize import WordPunctTokenizer
import pandas as pd
import json
import re
class Embeddings(object):
def __init__(self, device, tokenizer_path,
output_path, delay=0):
self.device = device
self.output_path = output_path
# self.checkpoints = self.get_checkpoints(dir_path)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
self.delay = delay
def get_checkpoints(self, dir_path):
checkpoints = [os.path.join(dir_path, file) for file in os.listdir(dir_path) if re.match("checkpoint.*", file)]
return checkpoints
def load_files(self, dataset):
senteval = ['subj_number', 'top_constituents', 'tree_depth']
if dataset in senteval:
data = pd.read_csv(f'{dataset}.txt', sep='\t', header=None)
TRAIN = data[data[0] == 'tr']
TEST = data[data[0] == 'te']
X_train = TRAIN[2]
X_test = TEST[2]
y_train = TRAIN[1].values
y_test = TEST[1].values
elif dataset == 'person':
data = pd.read_csv('person.tsv', sep='\t')
TRAIN = data[data['subset'] == 'tr']
TEST = data[data['subset'] == 'te']
X_train = TRAIN['text']
X_test = TEST['text']
y_train = TRAIN['label'].values
y_test = TEST['label'].values
elif dataset == 'conn':
TRAIN = pd.read_csv('Conn_train.tsv', sep='\t')
TEST = pd.read_csv('Conn_test.tsv', sep='\t')
X_train = TRAIN[['sentence_1', 'sentence_2']].values.tolist()
X_test = TEST[['sentence_1', 'sentence_2']].values.tolist()
y_train = TRAIN['marker'].values
y_test = TEST['marker'].values
elif dataset == 'DC' or dataset == 'SP':
TRAIN = pd.read_csv(f'{dataset}_train.csv')
TEST = pd.read_csv(f'{dataset}_test.csv')
X_train = TRAIN['sentence'].apply(eval)
X_test = TEST['sentence'].apply(eval)
y_train = TRAIN['label'].values
y_test = TEST['label'].values
else:
TRAIN = pd.read_csv('PDTB_train.csv')
TEST = pd.read_csv('PDTB_test.csv')
X_train = TRAIN[['sentence_1', 'sentence_2']].values.tolist()
X_test = TEST[['sentence_1', 'sentence_2']].values.tolist()
y_train = TRAIN['label'].values
y_test = TEST['label'].values
return X_train, y_train, X_test, y_test
def load_model(self, checkpoint_path):
"""
Loads a transformer model
:return: a model and a tokenizer
"""
model = AutoModel.from_pretrained(checkpoint_path, output_hidden_states=True)
model = model.to(self.device)
return model
def get_emb(self, sent, model):
"""
Encodes a sentence and returns an embedding
:param sent: a sentence, str
:param model: a transformer model
:return:
"""
with torch.no_grad():
enc = self.tokenizer(sent, padding=True, truncation=True,
max_length=512, return_tensors='pt')
enc = enc.to(self.device)
output = model(**enc, return_dict=True)
states = output.hidden_states
mean_pool = np.zeros((model.config.hidden_size,
model.config.num_hidden_layers + 1))
for num, emb in enumerate(states):
mean_pool[:, num] = torch.mean(emb, 1).squeeze(0).cpu().numpy()
return mean_pool
def calculate_embeddings(self, model, data):
"""
Calculates embeddings for all sentences
:param model: a path to a checkpoint
:param data: a corpus of texts
:return: a matrix of embeddings
"""
embeddings = np.zeros((len(data),
model.config.hidden_size,
model.config.num_hidden_layers + 1))
for i, sentence in enumerate(data):
embeddings[i] = self.get_emb(sentence, model)
return embeddings
def save_embeddings(self, test_name, embeddings, checkpoint):
"""
Saves embeddings to a pickle file
:param embeddings: a matrix of embeddings
:param checkpoint: a checkpoint
:return: a pickle file
"""
with open(os.path.join(self.output_path, f'BERT_checkpoints_{test_name}_{int(checkpoint)+self.delay}.pickle'), 'wb') as f:
pickle.dump(embeddings, f, protocol=4)
def calculate(self, task, X_train, X_test):
for checkpoint in tqdm(self.checkpoints):
num_checkpoint = checkpoint.split('-')[-1]
model = self.load_model(checkpoint)
embs = self.calculate_embeddings(model, X_train)
self.save_embeddings(f"{task}_TRAIN", embs, num_checkpoint)
embs = self.calculate_embeddings(model, X_test)
self.save_embeddings(f"{task}_TEST", embs, num_checkpoint)
class DiscourseEmbeddings(Embeddings):
def __init__(self, device, tokenizer_path,
output_path, delay=0):
super().__init__(device, tokenizer_path,
output_path, delay)
def encode(self, sentences):
batches = []
for text in sentences:
tokenized_text = self.tokenizer.batch_encode_plus(text,
max_length=512,
add_special_tokens=True,
padding=True,
truncation=True,
return_attention_mask=True)
batches.append(tokenized_text)
return batches
def mean_pooling(self, model_output, attention_mask, emb_number, size, num_layers):
final_tokens = np.zeros((model_output[0].shape[0], size, num_layers))
tokens = model_output[0].cpu().detach().numpy()
input_mask_expanded = attention_mask.unsqueeze(-1).expand(tokens.shape).float().cpu().detach().numpy()
normalized = np.clip(np.sum(input_mask_expanded, axis=1), a_min=1e-9, a_max=None)
for num, i in enumerate(model_output):
tokens = i.cpu().detach().numpy() # batch_size x seq_len x emb_size
final_tokens[:,:,num] = np.sum(tokens * input_mask_expanded, axis=1)/normalized
return final_tokens
def get_discourse_emb(self, batch, model, emb_number):
"""
Encodes a sentence and returns an embedding
:param batch: a batch
:param model: a transformer model
:return:
"""
input_ids = torch.tensor(batch['input_ids'], dtype=torch.long).to(self.device)
attention_mask = torch.tensor(batch['attention_mask'], dtype=torch.long).to(self.device)
with torch.no_grad():
output = model(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=True
)
emb = output.hidden_states
mean_pool = self.mean_pooling(emb, attention_mask,
emb_number, model.config.hidden_size,
model.config.num_hidden_layers + 1)
embedding = np.concatenate(mean_pool, axis=0)
return embedding
def calculate_discourse_embeddings(self, model, data):
"""
Calculates embeddings for all sentences
:param model: a path to a checkpoint
:param data: a corpus of texts
:return: a matrix of embeddings
"""
batches = self.encode(data)
emb_number = len(batches[0]['input_ids'])
embeddings = np.zeros((len(data),
model.config.hidden_size * emb_number,
model.config.num_hidden_layers + 1))
for i, batch in enumerate(batches):
embeddings[i] = self.get_discourse_emb(batch, model, emb_number)
return embeddings
def calculate_discourse(self, task, X_train, X_test):
for checkpoint in tqdm(self.checkpoints):
num_checkpoint = checkpoint.split('-')[-1]
model = self.load_model(checkpoint)
embs = self.calculate_discourse_embeddings(model, X_train)
self.save_embeddings(f"{task}_TRAIN", embs, num_checkpoint)
embs = self.calculate_discourse_embeddings(model, X_test)
self.save_embeddings(f"{task}_TEST", embs, num_checkpoint)
class BLiMPEmbeddings(Embeddings):
def __init__(self, device, tokenizer_path,
output_path, delay=0):
super().__init__(device, tokenizer_path,
output_path, delay)
self.preprocess = WordPunctTokenizer()
def load_blimp_file(self, dataset):
with open(f'{dataset}.jsonl') as file:
tasks = list(file)
text = []
for i in tasks:
string = json.loads(i)
text.append([string['sentence_bad'], string['sentence_good']])
dataframe = pd.DataFrame(text, columns=['sentence_bad', 'sentence_good'])
return dataframe
def mask_sentences(self, data):
masked_sentences = []
sentences = [a for pair in data.values for a in pair]
for sentence in sentences:
masks = []
s = self.preprocess.tokenize(sentence)
for i, word in enumerate(s):
masked = self.preprocess.tokenize(sentence)
masked[i] = '[MASK]'
masks.append([' '.join(masked), word])
masked_sentences.append(masks)
return masked_sentences
def calculate_probs(self, masked_sentences, unmasker):
probs = []
for s in masked_sentences:
prob = 0
for sent, word in s:
for a in unmasker(sent):
if a['token_str'] == word.lower():
prob += a['score']
probs.append(prob / len(sent))
return probs
def calculate_accuracy(self, probs):
accuracy = 0
for i in range(0, len(probs), 2):
if probs[i] < probs[i + 1]:
accuracy += 1
accuracy = accuracy * 2 / len(probs)
return accuracy
def save_probs(self, test_name, probs, checkpoint):
with open(os.path.join(self.output_path, f'probes_{test_name}_{int(checkpoint) + self.delay}.txt'), 'w', encoding='utf-8') as file:
for i in range(0, len(probs), 2):
prob_str = str(probs[i]) + '\t' + str(probs[i + 1])
file.write(prob_str)
def save_metrics(self, metrics, task):
metrics_csv = pd.DataFrame(metrics, columns=["Checkpoint", "Accuracy"])
metrics_csv.to_csv(os.path.join(self.output_path, f"{task}_metrics.csv"))
def get_probabilities(self, task, dataframe):
masked_sentences = self.mask_sentences(dataframe)
metrics = []
for checkpoint in tqdm(self.checkpoints):
num_checkpoint = checkpoint.split('-')[-1]
unmasker = pipeline('fill-mask', tokenizer=self.tokenizer,
model=checkpoint, device=0)
probs = self.calculate_probs(masked_sentences, unmasker)
self.save_probs(task, probs, num_checkpoint)
accuracy = self.calculate_accuracy(probs)
metrics.append([num_checkpoint, accuracy])
self.save_metrics(metrics, task)
return metrics
class Prober(DiscourseEmbeddings, BLiMPEmbeddings):
def __init__(self, dir_path, tokenizer_path, output_path,
delay=0, device="cuda:0"):
self.discourse = ["conn", "PDTB", "DC", "SP", ]
self.morphosyntax = ["subj_number", "top_constituents",
"tree_depth", "person", ]
self.blimp = ["adjunct_island", "principle_A_c_command",
"passive_1", "transitive"]
self.checkpoints = self.get_checkpoints(dir_path)
super().__init__(device=device, delay=delay,
output_path=output_path,
tokenizer_path=tokenizer_path)
def run_probe(self):
for task in self.morphosyntax:
print(f"Calculating {task} task...")
X_train, y_train, X_test, y_test = self.load_files(task)
self.calculate(task, X_train, X_test)
for task in self.discourse:
print(f"Calculating {task} task...")
X_train, y_train, X_test, y_test = self.load_files(task)
self.calculate_discourse(task, X_train, X_test)
for task in self.blimp:
print(f"Calculating {task} task...")
dataframe = self.load_blimp_file(task)
self.get_probabilities(task, dataframe)
| 12,980 | 41.560656 | 139 | py |
secure-muda | secure-muda-master/feature-gen-code/main.py | """
Extract features from pre-trained networks.
The main procedures are finetune and extract features.
Finetune: Given an Imagenet pretrained model (such as ResNet50), finetune it on a dataset (we call it source)
Extractor: After fine-tune, extract features on the target domain using finetuned models on source
This class supports most image models: Alexnet, Resnet(xx), VGG.
Other text or digit models can be easily extended using this code, see models.py for details.
"""
import argparse
import data_load
import models
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import time
import copy
import os
# Command setting
parser = argparse.ArgumentParser(description='Finetune')
parser.add_argument('--model_name', type=str,
help='model name', default='resnet50')
parser.add_argument('--batchsize', type=int, help='batch size', default=64)
parser.add_argument('--gpu', type=int, help='cuda id', default=0)
parser.add_argument('--dataset', type=str, default='office-31')
parser.add_argument('--source', type=str, default='amazon')
# parser.add_argument('--target', type=str, default='webcam')
parser.add_argument('--target', type=str, default='webcam',
help='List of target domains separated by commas')
parser.add_argument('--num_class', type=int, default=12)
parser.add_argument('--dataset_path', type=str,
default='../../data/office-31/')
parser.add_argument('--epoch', type=int, help='Train epochs', default=100)
parser.add_argument('--momentum', type=float, help='Momentum', default=.9)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--finetune', type=int,
help='Needs finetune or not', default=1)
parser.add_argument('--extract', type=int,
help='Needs extract features or not', default=1)
args = parser.parse_args()
# Parameter setting
DEVICE = torch.device('cuda:' + str(args.gpu)
if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = {'src': int(args.batchsize), 'tar': int(args.batchsize)}
def get_optimizer(model):
learning_rate = args.lr
param_group = []
param_group += [{'params': model.base_network.parameters(),
'lr': learning_rate}]
param_group += [{'params': model.classifier_layer.parameters(),
'lr': learning_rate * 10}]
optimizer = optim.SGD(param_group, momentum=args.momentum)
return optimizer
# Schedule learning rate according to DANN if you want to (while I think this equation is wierd therefore I did not use this one)
def lr_schedule(optimizer, epoch):
def lr_decay(LR, n_epoch, e):
return LR / (1 + 10 * e / n_epoch) ** 0.75
for i in range(len(optimizer.param_groups)):
if i < len(optimizer.param_groups) - 1:
optimizer.param_groups[i]['lr'] = lr_decay(
LEARNING_RATE, N_EPOCH, epoch)
else:
optimizer.param_groups[i]['lr'] = lr_decay(
LEARNING_RATE, N_EPOCH, epoch) * 10
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, reduction=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.reduction = reduction
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.to(DEVICE)
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).sum(dim=1)
if self.reduction:
return loss.mean()
else:
return loss
return loss
def finetune(model, dataloaders, optimizer, criterion, best_model_path, use_lr_schedule=False):
N_EPOCH = args.epoch
best_model_wts = copy.deepcopy(model.state_dict())
since = time.time()
best_acc = 0.0
acc_hist = []
for epoch in range(1, N_EPOCH + 1):
if use_lr_schedule:
lr_schedule(optimizer, epoch)
for phase in ['train', 'val']:
if phase == 'train':
model.train()
else:
model.eval()
total_loss, correct = 0, 0
for inputs, labels in dataloaders[phase]:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = criterion(outputs, labels)
preds = torch.max(outputs, 1)[1]
if phase == 'train':
loss.backward()
optimizer.step()
total_loss += loss.item() * inputs.size(0)
correct += torch.sum(preds == labels.data)
epoch_loss = total_loss / len(dataloaders[phase].dataset)
epoch_acc = correct.double() / len(dataloaders[phase].dataset)
acc_hist.append([epoch_loss, epoch_acc])
print('Epoch: [{:02d}/{:02d}]---{}, loss: {:.6f}, acc: {:.4f}'.format(epoch, N_EPOCH, phase, epoch_loss,
epoch_acc))
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(
), 'save_model_{}/best_{}_{}-{}.pth'.format(args.dataset, args.model_name, args.source, epoch))
time_pass = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_pass // 60, time_pass % 60))
print('------Best acc: {}'.format(best_acc))
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), best_model_path)
print('Best model saved!')
return model, best_acc, acc_hist
# Extract features for given intermediate layers
# Currently, this only works for ResNet since AlexNet and VGGNET only have features and classifiers modules.
# You will need to manually define a function in the forward function to extract features
# (by letting it return features and labels).
# Please follow digit_deep_network.py for reference.
class FeatureExtractor(nn.Module):
def __init__(self, model, extracted_layers):
super(FeatureExtractor, self).__init__()
self.model = model._modules['module'] if type(
model) == torch.nn.DataParallel else model
self.extracted_layers = extracted_layers
def forward(self, x):
outputs = []
for name, module in self.model._modules.items():
if name == "fc":
x = x.view(x.size(0), -1)
x = module(x)
if name in self.extracted_layers:
outputs.append(x)
return outputs
def extract_feature(model, dataloader, save_path, load_from_disk=True, model_path=''):
if load_from_disk:
model = models.Network(base_net=args.model_name,
n_class=args.num_class)
model.load_state_dict(torch.load(model_path))
model = model.to(DEVICE)
model.eval()
correct = 0
fea_all = torch.zeros(1,1+model.base_network.output_num()).to(DEVICE)
with torch.no_grad():
for inputs, labels in dataloader:
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
feas = model.get_features(inputs)
labels = labels.view(labels.size(0), 1).float()
x = torch.cat((feas, labels), dim=1)
fea_all = torch.cat((fea_all, x), dim=0)
outputs = model(inputs)
preds = torch.max(outputs, 1)[1]
# correct += torch.sum(preds == labels.data.long())
correct += torch.sum(preds == labels.view(labels.size(0)).data.long())
# print(inputs.shape, preds.shape, outputs.shape, labels.shape, torch.sum(preds == labels.view(labels.size(0)).data.long()))
# print(correct, len(dataloader.dataset))
test_acc = correct.double() / len(dataloader.dataset)
fea_numpy = fea_all.cpu().numpy()
np.savetxt(save_path, fea_numpy[1:], fmt='%.6f', delimiter=',')
print('Test acc: %f' % test_acc)
# You may want to classify with 1nn after getting features
def classify_1nn(data_train, data_test):
'''
Classification using 1NN
Inputs: data_train, data_test: train and test csv file path
Outputs: yprediction and accuracy
'''
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
data = {'src': np.loadtxt(data_train, delimiter=','),
'tar': np.loadtxt(data_test, delimiter=','),
}
Xs, Ys, Xt, Yt = data['src'][:, :-1], data['src'][:, -
1], data['tar'][:, :-1], data['tar'][:, -1]
Xs = StandardScaler(with_mean=0, with_std=1).fit_transform(Xs)
Xt = StandardScaler(with_mean=0, with_std=1).fit_transform(Xt)
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(Xs, Ys)
ypred = clf.predict(Xt)
acc = accuracy_score(y_true=Yt, y_pred=ypred)
print('Acc: {:.4f}'.format(acc))
return ypred, acc
if __name__ == '__main__':
torch.manual_seed(10)
# Load data
print('Loading data...')
data_folder = args.dataset_path
domain = {'src': str(args.source), 'tar': str(args.target)}
dataloaders = {}
data_train = data_load.load_data(
data_folder, domain['src'], BATCH_SIZE['src'], 'train', train_val_split=True, train_ratio=.8)
dataloaders['train'], dataloaders['val'] = data_train[0], data_train[1]
print('Data loaded: Source: {}, Target: {}'.format(args.source, args.target))
# Finetune
if args.finetune == 1:
print('Begin finetuning...')
net = models.Network(base_net=args.model_name,
n_class=args.num_class).to(DEVICE)
# criterion = nn.CrossEntropyLoss()
criterion = CrossEntropyLabelSmooth(num_classes=args.num_class, epsilon=0.1)
optimizer = get_optimizer(net)
if not os.path.exists('save_model_{}/'.format(args.dataset)):
os.mkdir('save_model_{}/'.format(args.dataset))
save_path = 'save_model_{}/best_{}_{}.pth'.format(
args.dataset, args.model_name, args.source)
model_best, best_acc, acc_hist = finetune(
net, dataloaders, optimizer, criterion, save_path, use_lr_schedule=False)
print('Finetune completed!')
# Extract features from finetuned model
if args.extract == 1:
model_path = 'save_model_{}/best_{}_{}.pth'.format(
args.dataset, args.model_name, args.source)
for target_domain in args.target.split(','):
data_test = data_load.load_data(data_folder, target_domain, BATCH_SIZE['tar'], 'test')
dataloaders['adapt'], dataloaders['test'] = data_train[0], data_train[1]
if 'domain-net' not in data_folder:
feature_save_path = 'save_model_{}/{}_{}_{}.csv'.format(args.dataset, args.source, target_domain, args.model_name)
print(feature_save_path)
extract_feature(None, data_test, feature_save_path, load_from_disk=True, model_path=model_path)
else:
train_feature_save_path = 'save_model_{}/{}_{}_{}_train.csv'.format(args.dataset, args.source, target_domain, args.model_name)
test_feature_save_path = 'save_model_{}/{}_{}_{}_test.csv'.format(args.dataset, args.source, target_domain, args.model_name)
extract_feature(None, data_test[0], train_feature_save_path, load_from_disk=True, model_path=model_path)
extract_feature(None, data_test[1], test_feature_save_path, load_from_disk=True, model_path=model_path)
print('Deep features are extracted and saved!')
| 12,648 | 42.920139 | 143 | py |
secure-muda | secure-muda-master/feature-gen-code/backbone.py | import numpy as np
import torch
import torch.nn as nn
import torchvision
from torchvision import models
# convnet without the last layer
class AlexNetFc(nn.Module):
def __init__(self):
super(AlexNetFc, self).__init__()
model_alexnet = models.alexnet(pretrained=True)
self.features = model_alexnet.features
self.classifier = nn.Sequential()
for i in range(6):
self.classifier.add_module(
"classifier"+str(i), model_alexnet.classifier[i])
self.__in_features = model_alexnet.classifier[6].in_features
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256*6*6)
x = self.classifier(x)
return x
def output_num(self):
return self.__in_features
class ResNet18Fc(nn.Module):
def __init__(self):
super(ResNet18Fc, self).__init__()
model_resnet18 = models.resnet18(pretrained=True)
self.conv1 = model_resnet18.conv1
self.bn1 = model_resnet18.bn1
self.relu = model_resnet18.relu
self.maxpool = model_resnet18.maxpool
self.layer1 = model_resnet18.layer1
self.layer2 = model_resnet18.layer2
self.layer3 = model_resnet18.layer3
self.layer4 = model_resnet18.layer4
self.avgpool = model_resnet18.avgpool
self.__in_features = model_resnet18.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def output_num(self):
return self.__in_features
class ResNet34Fc(nn.Module):
def __init__(self):
super(ResNet34Fc, self).__init__()
model_resnet34 = models.resnet34(pretrained=True)
self.conv1 = model_resnet34.conv1
self.bn1 = model_resnet34.bn1
self.relu = model_resnet34.relu
self.maxpool = model_resnet34.maxpool
self.layer1 = model_resnet34.layer1
self.layer2 = model_resnet34.layer2
self.layer3 = model_resnet34.layer3
self.layer4 = model_resnet34.layer4
self.avgpool = model_resnet34.avgpool
self.__in_features = model_resnet34.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def output_num(self):
return self.__in_features
class ResNet50Fc(nn.Module):
def __init__(self):
super(ResNet50Fc, self).__init__()
model_resnet50 = models.resnet50(pretrained=True)
self.conv1 = model_resnet50.conv1
self.bn1 = model_resnet50.bn1
self.relu = model_resnet50.relu
self.maxpool = model_resnet50.maxpool
self.layer1 = model_resnet50.layer1
self.layer2 = model_resnet50.layer2
self.layer3 = model_resnet50.layer3
self.layer4 = model_resnet50.layer4
self.avgpool = model_resnet50.avgpool
self.__in_features = model_resnet50.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def output_num(self):
return self.__in_features
class ResNet101Fc(nn.Module):
def __init__(self):
super(ResNet101Fc, self).__init__()
model_resnet101 = models.resnet101(pretrained=True)
self.conv1 = model_resnet101.conv1
self.bn1 = model_resnet101.bn1
self.relu = model_resnet101.relu
self.maxpool = model_resnet101.maxpool
self.layer1 = model_resnet101.layer1
self.layer2 = model_resnet101.layer2
self.layer3 = model_resnet101.layer3
self.layer4 = model_resnet101.layer4
self.avgpool = model_resnet101.avgpool
self.__in_features = model_resnet101.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def output_num(self):
return self.__in_features
class ResNet152Fc(nn.Module):
def __init__(self):
super(ResNet152Fc, self).__init__()
model_resnet152 = models.resnet152(pretrained=True)
self.conv1 = model_resnet152.conv1
self.bn1 = model_resnet152.bn1
self.relu = model_resnet152.relu
self.maxpool = model_resnet152.maxpool
self.layer1 = model_resnet152.layer1
self.layer2 = model_resnet152.layer2
self.layer3 = model_resnet152.layer3
self.layer4 = model_resnet152.layer4
self.avgpool = model_resnet152.avgpool
self.__in_features = model_resnet152.fc.in_features
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def output_num(self):
return self.__in_features
network_dict = {"alexnet": AlexNetFc,
"resnet18": ResNet18Fc,
"resnet34": ResNet34Fc,
"resnet50": ResNet50Fc,
"resnet101": ResNet101Fc,
"resnet152": ResNet152Fc}
| 5,961 | 29.418367 | 68 | py |
secure-muda | secure-muda-master/feature-gen-code/models.py | import torch
import torch.nn as nn
import backbone
class Network(nn.Module):
def __init__(self, base_net='alexnet', n_class=31):
super(Network, self).__init__()
self.n_class = n_class
self.base_network = backbone.network_dict[base_net]()
self.classifier_layer = nn.Linear(
self.base_network.output_num(), n_class)
self.classifier_layer.weight.data.normal_(0, 0.005)
self.classifier_layer.bias.data.fill_(0.1)
def forward(self, x):
features = self.base_network(x)
clf = self.classifier_layer(features)
return clf
def get_features(self, x):
features = self.base_network(x)
return features
| 703 | 28.333333 | 61 | py |
secure-muda | secure-muda-master/feature-gen-code/data_load.py | from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset
import torch
from PIL import Image
# This file works for RGB images.
def load_data(data_folder, domain_name, batch_size, phase='train', train_val_split=True, train_ratio=.8):
transform_dict = {
'train': transforms.Compose(
# [transforms.Resize(256),
[transforms.Resize((256,256)),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
'test': transforms.Compose(
# [transforms.Resize(224),
[transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])}
data = datasets.ImageFolder(root='/'.join([data_folder, domain_name]), transform=transform_dict[phase])
if 'domain-net' not in data_folder:
if phase == 'train':
if train_val_split:
train_size = int(train_ratio * len(data))
test_size = len(data) - train_size
data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=4)
val_loader = DataLoader(data_val, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=4)
return [train_loader, val_loader]
else:
train_loader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=4)
return train_loader
else:
test_loader = DataLoader(data, batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=4)
return test_loader
else:
print('Domain-net specific processing')
data_imgs_idx = {k[0]:v for v,k in enumerate(data.imgs)}
train_idx_f = '/'.join([data_folder, domain_name]) + '_train.txt'
test_idx_f = '/'.join([data_folder, domain_name]) + '_test.txt'
with open(train_idx_f) as f:
train_imgs = f.readlines()
train_imgs = ['/'.join([data_folder, x.split(" ")[0]]) for x in train_imgs]
train_indices = [data_imgs_idx[x] for x in train_imgs]
with open(test_idx_f) as f:
test_imgs = f.readlines()
test_imgs = ['/'.join([data_folder, x.split(" ")[0]]) for x in test_imgs]
test_indices = [data_imgs_idx[x] for x in test_imgs]
train_loader = DataLoader(Subset(data, train_indices), batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=4)
test_loader = DataLoader(Subset(data, test_indices), batch_size=batch_size, shuffle=False, drop_last=False,
num_workers=4)
print('Domain {} has {} samples. Train dataloader has size {} and test dataloader has size {}'.format(domain_name, len(data), \
len(train_loader.dataset), \
len(test_loader.dataset)))
return train_loader, test_loader
# ## Below are for ImageCLEF datasets
# class ImageCLEF(torch.utils.data.Dataset):
# def __init__(self, root_dir, domain, transform=None):
# super(ImageCLEF, self).__init__()
# self.transform = transform
# file_name = root_dir + 'list/' + domain + 'List.txt'
# lines = open(file_name, 'r').readlines()
# self.images, self.labels = [], []
# self.domain = domain
# for item in lines:
# line = item.strip().split(' ')
# self.images.append(root_dir + domain + '/' + line[0].split('/')[-1])
# self.labels.append(int(line[1].strip()))
# def __getitem__(self, index):
# image = self.images[index]
# target = self.labels[index]
# img = Image.open(image).convert('RGB')
# if self.transform:
# image = self.transform(img)
# return image, target
# def __len__(self):
# return len(self.images)
# def load_imageclef_train(root_path, domain, batch_size, phase):
# transform_dict = {
# 'src': transforms.Compose(
# [transforms.Resize((256, 256)),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ]),
# 'tar': transforms.Compose(
# [transforms.Resize((224, 224)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ])}
# data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
# train_size = int(0.8 * len(data))
# test_size = len(data) - train_size
# data_train, data_val = torch.utils.data.random_split(data, [train_size, test_size])
# train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False,
# num_workers=4)
# val_loader = DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False,
# num_workers=4)
# return train_loader, val_loader
# def load_imageclef_test(root_path, domain, batch_size, phase):
# transform_dict = {
# 'src': transforms.Compose(
# [transforms.Resize((256,256)),
# transforms.RandomCrop(224),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ]),
# 'tar': transforms.Compose(
# [transforms.Resize((224, 224)),
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ])}
# data = ImageCLEF(root_dir=root_path, domain=domain, transform=transform_dict[phase])
# data_loader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
# return data_loader | 6,811 | 45.027027 | 135 | py |
secure-muda | secure-muda-master/train_code/customlayers.py | import torch
import torch.nn as nn
import torch.nn.utils.weight_norm as weightNorm
from torchvision import models
# single domain classifier layer
class ClassifierLayer(nn.Module):
def __init__(self, input_dim, classes, dropout):
super(ClassifierLayer, self).__init__()
self.classes = classes
self.input_dim = input_dim
self.net = nn.Sequential(
nn.Dropout(dropout),
weightNorm(nn.Linear(self.input_dim, self.classes))
)
def forward(self, x):
return self.net(x)
# forward layer
class ForwardLayer(nn.Module):
def __init__(self, inp_lin1, inp_lin2, f_dims, dropout):
super(ForwardLayer, self).__init__()
self.inp_lin1 = inp_lin1
self.inp_lin2 = inp_lin2
self.f_dims = f_dims
self.net = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(self.inp_lin1,self.inp_lin2),
nn.BatchNorm1d(self.inp_lin2),
nn.ELU(inplace=True),
nn.Dropout(dropout),
nn.Linear(self.inp_lin2,self.inp_lin2),
nn.BatchNorm1d(self.inp_lin2),
nn.ELU(inplace=True),
nn.Dropout(dropout),
nn.Linear(self.inp_lin2,self.f_dims),
nn.BatchNorm1d(self.f_dims),
nn.ELU(inplace=True),
nn.Dropout(dropout),
nn.Linear(self.f_dims, self.f_dims),
nn.BatchNorm1d(self.f_dims),
nn.ELU(inplace=True)
)
def forward(self,x):
return self.net(x)
# backbone layer
class BackBoneLayer(nn.Module):
def __init__(self,pre,out_feats):
super(BackBoneLayer, self).__init__()
if pre == 'resnet101':
temp_resnet = models.resnet101(pretrained=True)
self.features = nn.Sequential(*[x for x in list(temp_resnet.children())[:-1]])
elif pre == 'resnet50':
temp_resnet = models.resnet50(pretrained=True)
self.features = nn.Sequential(*[x for x in list(temp_resnet.children())[:-1]])
self.pre = pre
self.out_feats = out_feats
def forward(self, x):
feats = self.features(x)
return feats.view((x.shape[0], self.out_feats))
| 2,318 | 30.337838 | 90 | py |
secure-muda | secure-muda-master/train_code/dataset.py | import os
import cv2
import torch
import random
import numpy as np
from tqdm import tqdm
from PIL import Image
import config as config
import matplotlib.pyplot as plt
from torchvision import transforms, utils
from torch.utils.data import Dataset, DataLoader
import pandas as pd
class FrozenDataset():
def __init__(self, file_name, train_ratio = 1):
file_name = os.path.join(config.server_root_path, config.settings['dataset_dir'], file_name)
data = pd.read_csv(file_name)
data = np.asarray(data)
x = data[:, :-1]
y = data[:, -1]
perm = np.random.RandomState(42).permutation(x.shape[0])
limit_train = int(x.shape[0] * train_ratio)
self.img = torch.tensor(x[perm[:limit_train]], dtype=torch.float32)
self.label = torch.tensor(y[perm[:limit_train]], dtype=torch.long)
self.val_img = torch.tensor(x[perm[limit_train:]], dtype=torch.float32)
self.val_label = torch.tensor(y[perm[limit_train:]], dtype=torch.long)
if train_ratio == 1:
assert x.shape[0] == self.img.shape[0]
def sample(self, batch_size):
assert self.img.shape[0] >= batch_size
idx = random.sample(range(self.img.shape[0]), batch_size)
return self.img[idx], self.label[idx]
def sample_val(self, batch_size):
assert self.val_img.shape[0] >= batch_size
idx = random.sample(range(self.val_img.shape[0]), batch_size)
return self.val_img[idx], self.val_label[idx]
# https://medium.com/@shashikachamod4u/excel-csv-to-pytorch-dataset-def496b6bcc1
class FeatureDataset(Dataset):
def __init__(self, file_name):
file_name = os.path.join(config.server_root_path, config.settings['dataset_dir'], file_name)
data = pd.read_csv(file_name)
data = np.asarray(data)
x = data[:, :-1]
y = data[:, -1]
self.img = torch.tensor(x, dtype=torch.float32)
self.label = torch.tensor(y, dtype=torch.long)
def __len__(self):
return self.img.shape[0]
def __getitem__(self, idx):
return idx, self.img[idx], self.label[idx] | 2,123 | 29.342857 | 100 | py |
secure-muda | secure-muda-master/train_code/config.py | import os
import shutil
import torch
import glob
import pdb
import numpy as np
from config_populate import data_settings
# While config is mostly unchanged throughout different runs, to avoid creating multiple config files for
# each problem, we select experiments using this auxiliary file
import exp_select as exp_select
def gen_exp_name():
st ='expt'
st = '_'.join([st,str(settings['bb'])])
st = '_'.join([st,dataset_name])
st = '_'.join([st,data_key])
st = '_'.join([st, str(exp_select.exp_id)])
if len(settings['optimizer_dict'] )>0:
active_losses = [loss for loss in settings['optimizer_dict'] if settings['use_loss'][loss] == True ]
if len(settings['id_str'])>0:
st = '_'.join([st,settings['id_str']])
return st
settings = {}
server_root_path = '../../'
dataset_name = exp_select.dataset_name
data_key = exp_select.data_key
comments = exp_select.comments
settings['dataset_name'] = dataset_name
settings['comments'] = comments
settings['server_root_path'] = server_root_path
settings['dataset_dir'] = os.path.join('data', 'pretrained-features', dataset_name)
settings['verbose'] = False
#dataset settings
settings['C'] = data_settings[dataset_name][data_key]['C']
settings['num_C'] = data_settings[dataset_name][data_key]['num_C']
settings['src_datasets'] = data_settings[dataset_name][data_key]['src_datasets']
settings['trgt_datasets'] = data_settings[dataset_name][data_key]['trgt_datasets']
st0 = np.random.get_state()[1][0]
t0 = torch.initial_seed()
settings['seed_value'] = {'torch':t0,'np':st0}
settings['resolution'] = 224
settings['index_list'] = 'index_list'
settings['balance_dataset'] = False
settings['bb'] = 'resnet50'
settings['bb_output'] = 2048
settings['F_dims'] = 256
settings['pseudo_label_hc_thresh'] = .95
settings['to_train'] = ['Fs', 'C']
settings['softmax_temperature'] = 1
settings['use_loss'] = {
'source':True,
'target':True,
}
settings['losses_after_enough_iters'] = ['target']
#optimizer settings
settings['optimizer_dict'] = {
'source':['Fs', 'C'],
'target':['Fs']
}
settings['lr'] = {
'source':1e-5,
'target':1e-5 if dataset_name == 'image-clef' else \
1e-7 if dataset_name == 'domain-net' else \
3e-6
}
settings['dropout'] = {
'office-31': .2,
'domain-net': 0,
'image-clef': .4,
'office-home': 0,
'office-caltech':.2
}
settings['num_cls_heads'] = 1
settings['gaussian_samples_per_class'] = 2000
settings['num_projections'] = 200
settings['eval_tuning_steps'] = 400 # How much work to do when computing the w_2 for eval purposes
settings['id_str'] = ''
settings['exp_name'] = gen_exp_name()
# conditional entropy regularizer
settings['gamma'] = {
'office-31': .02,
'domain-net': 1,
'image-clef': .02,
'office-home': 1,
'office-caltech':.02
}
settings['train_ratio'] = {
'office-31': .8,
'domain-net': .8,
'image-clef': .8,
'office-home': .8,
'office-caltech':.8
}
settings['mode'] = {'train':0,'val':1}
settings['summaries_path'] = os.path.join(server_root_path, 'summaries')
settings['weights_path'] = os.path.join(server_root_path, 'weights')
settings['gaussians_path'] = os.path.join(server_root_path, 'computed_gaussians')
settings['gpu'] = exp_select.gpu
settings['device'] = 'cuda:' + str(settings['gpu'])
torch.cuda.set_device(settings['gpu'])
settings['tb_port_no'] = 9999 - settings['gpu']
settings['expt_dict'] = {
'office-31':{
'AD_W':{'enough_iter':12000,'max_iter':50000,'val_after':500,'batch_size':16,'adapt_batch_size':465,'val_batch_size_factor':20},
'DW_A':{'enough_iter':12000,'max_iter':50000,'val_after':500,'batch_size':16,'adapt_batch_size':465,'val_batch_size_factor':20},
'AW_D':{'enough_iter':12000,'max_iter':50000,'val_after':500,'batch_size':16,'adapt_batch_size':465,'val_batch_size_factor':20}
},
'domain-net':{
'CIPQR_S':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1},
'CIPQS_R':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1},
'CIPSR_Q':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1},
'CPQRS_I':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1},
'CIQRS_P':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1},
'IPQRS_C':{'enough_iter':80000,'max_iter':240000,'val_after':10000,'batch_size':32,'adapt_batch_size':2415,'val_batch_size_factor':1}
},
'image-clef':{
'PC_I':{'enough_iter':4000,'max_iter':7000,'val_after':100,'batch_size':16,'adapt_batch_size':300,'val_batch_size_factor':20},
'IC_P':{'enough_iter':4000,'max_iter':7000,'val_after':100,'batch_size':16,'adapt_batch_size':300,'val_batch_size_factor':20},
'IP_C':{'enough_iter':4000,'max_iter':7000,'val_after':100,'batch_size':16,'adapt_batch_size':300,'val_batch_size_factor':20}
},
'office-home':{
'ACP_R':{'enough_iter':15000,'max_iter':25000,'val_after':500,'batch_size':256,'adapt_batch_size':975,'val_batch_size_factor':20},
'ACR_P':{'enough_iter':15000,'max_iter':25000,'val_after':500,'batch_size':256,'adapt_batch_size':975,'val_batch_size_factor':20},
'APR_C':{'enough_iter':15000,'max_iter':25000,'val_after':500,'batch_size':256,'adapt_batch_size':975,'val_batch_size_factor':20},
'CPR_A':{'enough_iter':15000,'max_iter':17000,'val_after':500,'batch_size':256,'adapt_batch_size':975,'val_batch_size_factor':20}
},
'office-caltech':{
'ACD_W':{'enough_iter':4000,'max_iter':10000,'val_after':100,'batch_size':16,'adapt_batch_size':150,'val_batch_size_factor':10},
'ADW_C':{'enough_iter':4000,'max_iter':10000,'val_after':100,'batch_size':16,'adapt_batch_size':150,'val_batch_size_factor':10},
'ACW_D':{'enough_iter':4000,'max_iter':10000,'val_after':100,'batch_size':16,'adapt_batch_size':150,'val_batch_size_factor':10},
'CDW_A':{'enough_iter':4000,'max_iter':10000,'val_after':100,'batch_size':16,'adapt_batch_size':150,'val_batch_size_factor':10}
}
}
settings['log_interval'] = settings['expt_dict'][dataset_name][data_key]['val_after']
settings['start_iter'] = 0
settings['max_iter'] = settings['expt_dict'][dataset_name][data_key]['max_iter']
settings['enough_iter'] = settings['expt_dict'][dataset_name][data_key]['enough_iter']
settings['val_after'] = settings['expt_dict'][dataset_name][data_key]['val_after']
settings['batch_size'] = settings['expt_dict'][dataset_name][data_key]['batch_size']
settings['adapt_batch_size'] = settings['expt_dict'][dataset_name][data_key]['adapt_batch_size']
settings['val_batch_size_factor'] = settings['expt_dict'][dataset_name][data_key]['val_batch_size_factor']
settings['load_model'] = False
settings['load_opt'] = False
settings['continue_training'] = False | 10,847 | 57.322581 | 194 | py |
secure-muda | secure-muda-master/train_code/net.py |
import torch.nn as nn
import torch
from customlayers import ClassifierLayer as C
from customlayers import BackBoneLayer as G
from customlayers import ForwardLayer as F
import config as config
class SingleSourceNet(nn.Module):
def __init__(self, settings):
super(SingleSourceNet, self).__init__()
self.model = {}
to_train = config.settings['to_train']
for module in to_train:
self.model[module]={}
dropout = settings['dropout'][settings['dataset_name']]
if module =='Fs':
self.model[module] = F(config.settings['bb_output'],config.settings['bb_output']//2,config.settings['F_dims'], dropout)
elif module =='C':
self.model[module] = C(config.settings['F_dims'], config.settings['num_C'][config.settings['src_datasets'][0]] * config.settings['num_cls_heads'], dropout)
elif module =='G':
self.model[module] = G(config.settings['bb'],config.settings['bb_output'])
elif module in config.settings['ss_tasks']:
self.model[module] = C(config.settings['F_dims'], 4)
for module,compts in self.model.items():
self.add_module(module,compts)
def forward(self, x ):
raise NotImplementedError('Implemented a custom forward in train loop')
if __name__=='__main__':
raise NotImplementedError('Please check README.md for execution details')
| 1,464 | 34.731707 | 171 | py |
secure-muda | secure-muda-master/train_code/metrics.py | import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
###########################################################------------------_SECTION LOSS--------------------###########################################################################
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, reduction=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.reduction = reduction
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).sum(dim=1)
if self.reduction:
return loss.mean()
else:
return loss
return loss
def loss_CE(M_logits, cls_labels, num_cls_heads, n_classes):
return nn.CrossEntropyLoss(reduction='mean')(M_logits.view(M_logits.shape[0] * num_cls_heads, n_classes), \
cls_labels.repeat_interleave(num_cls_heads))
# return nn.CrossEntropyLoss(reduction='mean')(M_logits,cls_labels)
def l4_mirror_CE(M_logits,cls_labels):
#by default matrix is batch x domain x class
cls_labels = cls_labels.view(-1,1)
n_batch,n_domain,n_class = M_logits.shape
cls_M_logits = M_logits.permute(0,2,1) #batch x class x domain
cls_labels = cls_labels.expand(n_batch,n_domain)
return nn.CrossEntropyLoss(reduction='mean')(cls_M_logits,cls_labels)
# SWD module as implemented in https://github.com/VinAIResearch/DSW
def rand_projections(embedding_dim, num_projections=100):
"""This function generates `num_projections` random samples from the latent space's unit sphere.
Args:
embedding_dim (int): embedding dimensionality
num_projections (int): number of random projection samples
Return:
torch.Tensor: tensor of size (num_projections, embedding_dim)
"""
projections = [w / np.sqrt((w**2).sum()) # L2 normalization
for w in np.random.normal(size=(num_projections, embedding_dim))]
projections = np.asarray(projections)
return torch.from_numpy(projections).type(torch.FloatTensor)
def _sliced_wasserstein_distance(encoded_samples,
distribution_samples,
num_projections=100,
p=2,
device='cpu'):
""" Sliced Wasserstein Distance between encoded samples and drawn distribution samples.
Args:
encoded_samples (toch.Tensor): tensor of encoded training samples
distribution_samples (torch.Tensor): tensor of drawn distribution training samples
num_projections (int): number of projections to approximate sliced wasserstein distance
p (int): power of distance metric
device (torch.device): torch device (default 'cpu')
Return:
torch.Tensor: tensor of wasserstrain distances of size (num_projections, 1)
"""
# derive latent space dimension size from random samples drawn from latent prior distribution
embedding_dim = distribution_samples.size(1)
# generate random projections in latent space
projections = rand_projections(embedding_dim, num_projections).to(device)
# calculate projections through the encoded samples
encoded_projections = encoded_samples.matmul(projections.transpose(0, 1))
# calculate projections through the prior distribution random samples
distribution_projections = (distribution_samples.matmul(projections.transpose(0, 1)))
# calculate the sliced wasserstein distance by
# sorting the samples per random projection and
# calculating the difference between the
# encoded samples and drawn random samples
# per random projection
wasserstein_distance = (torch.sort(encoded_projections.transpose(0, 1), dim=1)[0] -
torch.sort(distribution_projections.transpose(0, 1), dim=1)[0])
# distance between latent space prior and encoded distributions
# power of 2 by default for Wasserstein-2
wasserstein_distance = torch.pow(wasserstein_distance, p)
# approximate mean wasserstein_distance for each projection
# return torch.sort(wasserstein_distance.mean(dim=-1))[0][:num_projections // 10].mean()
return wasserstein_distance.mean()
def sliced_wasserstein_distance(encoded_samples,
gaussian_samples,
num_projections=50,
p=2,
device='cpu'):
""" Sliced Wasserstein Distance between encoded samples and drawn distribution samples.
Args:
encoded_samples (toch.Tensor): tensor of encoded training samples
distribution_samples (torch.Tensor): tensor of drawn distribution training samples
num_projections (int): number of projections to approximate sliced wasserstein distance
p (int): power of distance metric
device (torch.device): torch device (default 'cpu')
Return:
torch.Tensor: tensor of wasserstrain distances of size (num_projections, 1)
"""
# approximate mean wasserstein_distance between encoded and prior distributions
# for each random projection
swd = _sliced_wasserstein_distance(encoded_samples, gaussian_samples,
num_projections, p, device)
return swd
############################################################------------------_SECTION METRIC--------------------###########################################################################
def acc_metric(preds,labels):
return np.mean(np.asarray(preds) == np.asarray(labels))
def get_metric(key, feats):
if key == 'cls_acc':
cls_preds = feats['cls_preds']
cls_labels = feats['cls_labels']
return acc_metric(cls_preds,cls_labels)
elif key == 'cls_acc_data':
cls_preds = np.array(feats['all_preds'])
cls_labels = np.array(feats['all_labels'])
metric = np.sum((cls_preds == cls_labels).astype(float))
return metric,len(cls_labels)
return metric
############################################################------------------_SECTION LOGITS-------------------###########################################################################
def get_logits(feats, num_cls_heads):
assert feats['C'].shape[1] % num_cls_heads == 0
C = feats['C']
C = C.view(C.shape[0], num_cls_heads, C.shape[1] // num_cls_heads)
C = C.softmax(dim=-1)
cls_C_logits = C.mean(axis=1)
return cls_C_logits,None,None
| 7,769 | 44.174419 | 188 | py |
secure-muda | secure-muda-master/train_code/gaussian_utils.py | import numpy as np
import metrics as metrics
from tqdm import tqdm
import torch
import time
from sklearn.mixture import GaussianMixture
def sample_from_gaussians(means, covs, n_samples, perm_res=True):
# Return samples from the num_classes gaussians trained on the source
N_CLASSES = len(n_samples)
Xs = []
Ys = []
for i in range(N_CLASSES):
if n_samples[i] > 0:
# print(i, means[i], covs[i])
curr_x = np.random.multivariate_normal(means[i], covs[i], n_samples[i])
curr_y = np.repeat(i, n_samples[i])
Xs.append(curr_x)
Ys.append(curr_y)
Xs = np.vstack(Xs)
Ys = np.concatenate(Ys)
if not perm_res:
return Xs, Ys
else:
perm = np.random.permutation(Xs.shape[0])
return Xs[perm,:], Ys[perm]
def learn_gaussians(trainer_S, debug=False):
###########################################################################################################################
# Extract source domain latent features from all sources
###########################################################################################################################s
trainer_S.set_mode(trainer_S.settings['mode']['val'])
with torch.no_grad():
# Gather samples from both source domains
all_labels_src = []
all_preds_src = []
all_confs_src = []
all_F_src = []
all_C_src = []
dom = trainer_S.src_domain
for i in range(trainer_S.source_dataset_train.img.shape[0]):
images = trainer_S.source_dataset_train.img[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
label = trainer_S.source_dataset_train.label[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainer_S.settings['device']).float()
label = label.to(trainer_S.settings['device']).long()
# G = trainer_S.network.model['G'](x)
F = trainer_S.network.model['Fs'](x)
C = trainer_S.network.model['C'](F)
cls_logits,_,mat = metrics.get_logits(feats={'C':C}, num_cls_heads=trainer_S.settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
all_labels_src.extend(list(label.cpu().numpy()))
all_preds_src.extend(list(cls_preds.cpu().numpy()))
all_confs_src.extend(list(cls_confs.cpu().numpy()))
all_F_src.append(F)
all_C_src.append(C)
all_labels_src = np.asarray(all_labels_src)
all_preds_src = np.asarray(all_preds_src)
all_confs_src = np.asarray(all_confs_src)
all_F_src = torch.cat(all_F_src,dim=0).cpu().numpy()
all_C_src = torch.cat(all_C_src,dim=0).cpu().numpy()
###########################################################################################################################
# Learn means and covariances
###########################################################################################################################
adapt_lvl = all_F_src
N_CLASSES = trainer_S.settings['num_C'][trainer_S.src_domain]
Z_SIZE = adapt_lvl.shape[-1]
trainer_S.means = np.zeros((N_CLASSES, Z_SIZE))
trainer_S.covs = np.zeros((N_CLASSES, Z_SIZE, Z_SIZE))
for c in range(N_CLASSES):
idx = (all_labels_src == c) & (all_preds_src == c)
if np.sum(idx) == 0:
idx = all_labels_src == c
assert np.sum(idx) > 0
trainer_S.means[c] = np.mean(adapt_lvl[idx], axis=0)
trainer_S.covs[c] = np.dot((adapt_lvl[idx] - trainer_S.means[c]).T, (adapt_lvl[idx] - trainer_S.means[c])) / np.sum(idx)
if debug == True:
# Store intermediate results for debug purposes
trainer_S.adapt_lvl = adapt_lvl
trainer_S.all_labels_src = all_labels_src
| 4,208 | 38.336449 | 128 | py |
secure-muda | secure-muda-master/train_code/evaluate_model.py | import numpy as np
import config as config
import metrics as metrics
import gaussian_utils
from trainer import MultiSourceTrainer
from tqdm import tqdm
import torch
from torch.autograd import Variable
def get_individual_performance():
# Computes the target performance for source-only and post-adaptation models
# First outputs source only performance for each source domain, followed by target performance for each source domain
pre_adapt_accs = []
post_adapt_accs = []
# First, see the initial performance of the models
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
trainer_S = MultiSourceTrainer(src_domain_idx)
# trainer_S.load_model_weights()
trainer_S.load_model_weights(it_thresh='enough_iter')
pre_adapt_accs.append(trainer_S.val_over_target_set(save_weights=False))
# Next, the performance after adaptation
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
trainer_S = MultiSourceTrainer(src_domain_idx)
# trainer_S.load_model_weights('model_max_iter' + str(trainer_S.settings['max_iter']) + '.pth')
trainer_S.load_model_weights(it_thresh='max_iter')
post_adapt_accs.append(trainer_S.val_over_target_set(save_weights=False))
return np.asarray(pre_adapt_accs), np.asarray(post_adapt_accs)
def get_target_accuracy(weights, it_thresh='max_iter'):
# Computes the accuracy obtained by combining logits from several models
logit_sum = None
all_logits_dict = {}
all_labels_dict = {}
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
trainer_S = MultiSourceTrainer(src_domain_idx)
trainer_S.load_model_weights(it_thresh=it_thresh)
trainer_S.set_mode(trainer_S.settings['mode']['val'])
with torch.no_grad():
# Gather samples from both source domains
all_labels_tar = []
all_preds_tar = []
all_F_src = []
all_logits = []
dom = trainer_S.trgt_domain
trainer_S.initialize_target_val_dataloader()
for i in range(trainer_S.val_target_dataset.img.shape[0]):
images = trainer_S.val_target_dataset.img[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
label = trainer_S.val_target_dataset.label[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainer_S.settings['device']).float()
label = label.to(trainer_S.settings['device']).long()
F = trainer_S.network.model['Fs'](x)
C = trainer_S.network.model['C'](F)
cls_logits,_,mat = metrics.get_logits(feats={'C':C}, num_cls_heads=trainer_S.settings['num_cls_heads'])
all_logits.append(cls_logits)
all_labels_tar.extend(list(label.cpu().numpy()))
all_labels_tar = np.asarray(all_labels_tar)
all_logits = torch.cat(all_logits, dim=0).cpu().numpy()
all_logits_dict[src_domain_idx] = np.copy(all_logits)
all_labels_dict[src_domain_idx] = np.copy(all_labels_tar)
if logit_sum is None:
logit_sum = weights[src_domain_idx] * all_logits
else:
logit_sum += weights[src_domain_idx] * all_logits
labels_hat = np.argmax(logit_sum, axis=-1)
return np.mean(all_labels_tar == labels_hat), labels_hat, all_labels_tar
def get_combined_predictions(weights, it_thresh='max_iter'):
# Computes the accuracy obtained by combining logits from several models
logit_sum = None
all_logits_dict = {}
all_labels_dict = {}
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
trainer_S = MultiSourceTrainer(src_domain_idx)
trainer_S.load_model_weights(it_thresh=it_thresh)
trainer_S.set_mode(trainer_S.settings['mode']['val'])
with torch.no_grad():
# Gather samples from both source domains
all_labels_tar = []
all_preds_tar = []
all_F_src = []
all_logits = []
dom = trainer_S.trgt_domain
trainer_S.initialize_target_val_dataloader()
for i in range(trainer_S.val_target_dataset.img.shape[0]):
images = trainer_S.val_target_dataset.img[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
label = trainer_S.val_target_dataset.label[i*trainer_S.batch_size : (i+1) * trainer_S.batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainer_S.settings['device']).float()
label = label.to(trainer_S.settings['device']).long()
F = trainer_S.network.model['Fs'](x)
C = trainer_S.network.model['C'](F)
cls_logits,_,mat = metrics.get_logits(feats={'C':C}, num_cls_heads=trainer_S.settings['num_cls_heads'])
all_logits.append(cls_logits)
all_labels_tar.extend(list(label.cpu().numpy()))
all_labels_tar = np.asarray(all_labels_tar)
all_logits = torch.cat(all_logits, dim=0).cpu().numpy()
all_logits_dict[src_domain_idx] = np.copy(all_logits)
all_labels_dict[src_domain_idx] = np.copy(all_labels_tar)
if logit_sum is None:
logit_sum = weights[src_domain_idx] * all_logits
else:
logit_sum += weights[src_domain_idx] * all_logits
labels_hat = np.argmax(logit_sum, axis=-1)
confidence_counts = {}
print(np.max(logit_sum, axis=1))
for conf in range(100):
c = conf/100
idx = np.max(logit_sum, axis=1) > c
print("for confidence {} there are {} samples and target accuracy is {}".format(c, np.sum(idx), np.mean(labels_hat[idx] == all_labels_tar[idx])))
confidence_counts[c] = np.sum(idx)
return confidence_counts, trainer_S.val_target_dataset.img.shape[0], labels_hat, all_labels_tar
def learn_w_w2(it_thresh='max_iter', num_steps=100):
w2_dist = np.zeros(len(config.settings['src_datasets']))
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
dom = config.settings['src_datasets'][src_domain_idx]
trainer = MultiSourceTrainer(src_domain_idx)
# learn the gaussians
trainer.load_model_weights(it_thresh='enough_iter')
gaussian_utils.learn_gaussians(trainer)
n_samples = np.ones(trainer.N_CLASSES, dtype=int) * trainer.settings["gaussian_samples_per_class"]
trainer.gaussian_z, trainer.gaussian_y = gaussian_utils.sample_from_gaussians(trainer.means, trainer.covs, n_samples)
# Load source and target dataloaders
trainer.initialize_src_train_dataloader()
trainer.initialize_target_adapt_dataloader()
# Find the batch size to use
batch_max = min(trainer.source_dataset_train.img.shape[0], trainer.adapt_target_dataset_train.img.shape[0])
batch_max = min(batch_max, trainer.adapt_batch_size)
trainer.batch_size = trainer.adapt_batch_size = batch_max
# Load model weights for source-only and post adaptation
src_trainer = MultiSourceTrainer(src_domain_idx)
src_trainer.load_model_weights(it_thresh='enough_iter')
src_trainer.set_mode(src_trainer.settings['mode']['val'])
adapt_trainer = MultiSourceTrainer(src_domain_idx)
adapt_trainer.load_model_weights(it_thresh='max_iter')
adapt_trainer.set_mode(adapt_trainer.settings['mode']['val'])
# Get the mean W2 distance between encodings of the current domain and the target domain
w2_dist[src_domain_idx] = 0
for step in range(num_steps):
# Get source samples
X_src,_ = trainer.source_dataset_train.sample(trainer.batch_size)
X_src = Variable(X_src).to(trainer.settings['device']).float()
X_tar,_ = trainer.adapt_target_dataset_train.sample(trainer.adapt_batch_size)
X_tar = Variable(X_tar).to(trainer.settings['device']).float()
# Compute the number of gaussian samples to be used for the current batch
normalized_dist = np.ones(trainer.N_CLASSES, dtype=int) / trainer.N_CLASSES
num_samples = np.array(normalized_dist * trainer.adapt_batch_size, dtype=int)
while batch_max > np.sum(num_samples):
idx = np.random.choice(range(trainer.N_CLASSES), p = normalized_dist)
num_samples[idx] += 1
# Get gaussian samples for the current batch
gz = []
gy = []
for c in range(trainer.N_CLASSES):
ind = np.where(trainer.gaussian_y == c)[0]
ind = ind[np.random.choice(range(len(ind)), num_samples[c], replace=False)]
gz.append(trainer.gaussian_z[ind])
gy.append(trainer.gaussian_y[ind])
gz = np.vstack(gz)
gy = np.concatenate(gy)
gz = torch.as_tensor(gz).to(trainer.settings['device']).float()
gy = torch.as_tensor(gy).to(trainer.settings['device']).long()
# Compute predictions
with torch.no_grad():
f_src = src_trainer.network.model['Fs'](X_src)
f_tar = adapt_trainer.network.model['Fs'](X_tar)
d1 = metrics.sliced_wasserstein_distance(f_src, gz, trainer.settings['num_projections'], 2, trainer.settings['device']).item()
d2 = metrics.sliced_wasserstein_distance(f_tar, gz, trainer.settings['num_projections'], 2, trainer.settings['device']).item()
w2_dist[src_domain_idx] += d1 + d2
if step % (num_steps // 10) == 0:
print(step, w2_dist / (step + 1), dom)
w2_dist = w2_dist / num_steps
print("Summation w = {}".format(w2_dist))
w = 1 / w2_dist
w = w / np.sum(w)
print("Final w = {}".format(w))
return w
# Learn the weights w minimizing the generalizability objective
# Note: Not source free, as we require simultaneous access to multiple source domains
def learn_w_generalizability(it_thresh='max_iter', num_steps=300):
trainers = {}
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("Loading trainer for source domain {}".format(config.settings['src_datasets'][src_domain_idx]))
dom = config.settings['src_datasets'][src_domain_idx]
trainers[dom] = MultiSourceTrainer(src_domain_idx)
trainers[dom].load_model_weights(it_thresh=it_thresh)
trainers[dom].set_mode(trainers[dom].settings['mode']['val'])
trainers[dom].src_data = {}
trainers[dom].initialize_src_train_dataloader()
w_gen = np.zeros(len(config.settings['src_datasets']))
for d1_idx in range(len(config.settings['src_datasets'])):
for d2_idx in range(len(config.settings['src_datasets'])):
d1 = config.settings['src_datasets'][d1_idx]
d2 = config.settings['src_datasets'][d2_idx]
if d1 == d2:
continue
acc = 0
for i in range(trainers[d2].source_dataset_train.img.shape[0]):
images = trainers[d2].source_dataset_train.img[i*trainers[d2].batch_size : (i+1) * trainers[d2].batch_size]
label = trainers[d2].source_dataset_train.label[i*trainers[d2].batch_size : (i+1) * trainers[d2].batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainers[d2].settings['device']).float()
label = label.to(trainers[d2].settings['device']).long()
with torch.no_grad():
Fs = trainers[d1].network.model['Fs'](x)
C = trainers[d1].network.model['C'](Fs)
cls_logits,_,_ = metrics.get_logits(feats={'C':C}, num_cls_heads=trainers[d1].settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
acc += torch.sum(cls_preds == label).item()
acc = acc / trainers[d2].source_dataset_train.img.shape[0]
w_gen[d1_idx] += acc
w_gen = w_gen / np.sum(w_gen)
print("Generalizability weights = {}".format(w_gen))
return w_gen
'''
Compare the methods by how many high confidence samples they have
'''
def learn_w_high_confidence(it_thresh='enough_iter', confidence=.5):
w_hc = np.zeros(len(config.settings['src_datasets']))
for src_domain_idx in range(len(config.settings['src_datasets'])):
trainer = MultiSourceTrainer(src_domain_idx)
trainer.load_model_weights(it_thresh=it_thresh)
trainer.set_mode(trainer.settings['mode']['val'])
trainer.initialize_target_val_dataloader()
for i in range(trainer.val_target_dataset.img.shape[0]):
images = trainer.val_target_dataset.img[i*trainer.batch_size : (i+1) * trainer.batch_size]
label = trainer.val_target_dataset.label[i*trainer.batch_size : (i+1) * trainer.batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainer.settings['device']).float()
label = label.to(trainer.settings['device']).long()
with torch.no_grad():
Fs = trainer.network.model['Fs'](x)
C = trainer.network.model['C'](Fs)
cls_logits,_,_ = metrics.get_logits(feats={'C':C}, num_cls_heads=trainer.settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
w_hc[src_domain_idx] += torch.sum(cls_confs > confidence).item()
print("Number of high confidence samples for each domain = {}".format(w_hc))
w_hc_raw = np.copy(w_hc)
w_hc = w_hc / np.sum(w_hc)
print("High confidence w = {}".format(w_hc))
return w_hc, w_hc_raw, trainer.val_target_dataset.img.shape[0]
#### Each dataset has their own representation of the source domains!!!
def test_predictions(it_thresh='max_iter'):
trainers = {}
for src_domain_idx in range(len(config.settings['src_datasets'])):
trainers[src_domain_idx] = MultiSourceTrainer(src_domain_idx)
trainers[src_domain_idx].load_model_weights(it_thresh=it_thresh)
trainers[src_domain_idx].set_mode(trainers[src_domain_idx].settings['mode']['val'])
trainers[src_domain_idx].initialize_target_val_dataloader()
with torch.no_grad():
all_labels_trgt = []
all_Fs = {}
all_Cs = {}
all_logits = {}
all_confs = {}
all_preds = {}
all_preds_trgt = {}
for src_domain_idx in range(len(config.settings['src_datasets'])):
all_Fs[src_domain_idx] = []
all_Cs[src_domain_idx] = []
all_confs[src_domain_idx] = []
all_preds[src_domain_idx] = []
all_preds_trgt[src_domain_idx] = []
all_logits[src_domain_idx] = []
for src_domain_idx in range(len(config.settings['src_datasets'])):
for i in range(trainers[0].val_target_dataset.img.shape[0]):
images = trainers[src_domain_idx].val_target_dataset.img[i*trainers[0].batch_size : (i+1) * trainers[0].batch_size]
label = trainers[src_domain_idx].val_target_dataset.label[i*trainers[0].batch_size : (i+1) * trainers[0].batch_size]
if images.shape[0] == 0:
continue
x = images.to(trainers[0].settings['device']).float()
label = label.to(trainers[0].settings['device']).long()
Fs = trainers[src_domain_idx].network.model['Fs'](x)
C = trainers[src_domain_idx].network.model['C'](Fs)
all_Fs[src_domain_idx].extend(Fs.clone().detach().cpu().numpy())
all_Cs[src_domain_idx].extend(C.clone().detach().cpu().numpy())
cls_logits,_,_ = metrics.get_logits(feats={'C':C}, num_cls_heads=trainers[src_domain_idx].settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
all_logits[src_domain_idx].extend(cls_logits.clone().cpu().numpy())
all_confs[src_domain_idx].extend(cls_confs.clone().cpu().numpy())
all_preds[src_domain_idx].extend(cls_preds.clone().cpu().numpy())
all_preds_trgt[src_domain_idx].extend(list(cls_preds.cpu().numpy()))
if src_domain_idx == 0:
all_labels_trgt.extend(list(label.cpu().numpy()))
for src_domain_idx in range(len(config.settings['src_datasets'])):
all_Fs[src_domain_idx] = np.vstack(all_Fs[src_domain_idx])
all_Cs[src_domain_idx] = np.vstack(all_Cs[src_domain_idx])
all_logits[src_domain_idx] = np.vstack(all_logits[src_domain_idx])
all_preds[src_domain_idx] = np.asarray(all_preds[src_domain_idx])
all_labels_trgt = np.asarray(all_labels_trgt)
# Accuracies for each domain
for src_domain_idx in range(len(config.settings['src_datasets'])):
print("{} -> {}, adaptation target acc = {:.4f}".format(trainers[src_domain_idx].src_domain, trainers[src_domain_idx].trgt_domain, np.sum(all_preds[src_domain_idx] == all_labels_trgt) / len(all_labels_trgt))) | 18,668 | 44.645477 | 216 | py |
secure-muda | secure-muda-master/train_code/trainer.py | import numpy as np
import copy
import shutil
import os
import config as config
import metrics as metrics
from net import SingleSourceNet as smodel
from dataset import FeatureDataset, FrozenDataset
import gaussian_utils
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.nn.utils.weight_norm as weightNorm
import matplotlib.pyplot as plt
import time
class MultiSourceTrainer(object):
def __init__(self, src_domain_idx):
self.settings = copy.deepcopy(config.settings)
self.src_domain = self.settings['src_datasets'][src_domain_idx]
self.trgt_domain = self.settings['trgt_datasets'][0]
self.N_CLASSES = self.settings["num_C"][self.src_domain]
self.network = smodel(self.settings).to(self.settings['device'])
self.mixing_weights = None
self.to_train = self.settings['to_train']
self.pseudo_target_dist = None
# Batch size
self.batch_size = self.settings['batch_size']
self.val_batch_size = self.settings['val_batch_size_factor']*self.settings['batch_size']
self.adapt_batch_size = self.settings['adapt_batch_size']
self.current_iteration = self.settings['start_iter']
self.exp_name = self.settings['exp_name']
self.phase = self.settings['mode']['train']
# Datasets
self.source_dataset_train = None
self.target_dataset_val = None
self.val_target_dataset = None
self.adapt_target_dataset_train = None
self.itt_delete = []
self.best_src_val_acc = -1
self.initialize_src_train_dataloader()
# self.initialize_src_train_self_sup_dataloader()
self.init_optimizers()
self.disable_dropout() # dropout initially disabled
all_losses = self.optimizer_dict.keys()
self.active_losses = [current_loss for current_loss in all_losses if self.settings['use_loss'][current_loss]]
self.source_loss_history = []
self.adaptation_loss_history = {}
self.target_acc_history = []
self.it_history = []
assert self.settings['enough_iter'] % self.settings['val_after'] == 0
assert self.settings['max_iter'] % self.settings['val_after'] == 0
def init_folder_paths(self):
for name in ['weights_path', 'summaries_path']:
if not os.path.exists(self.settings[name]):
os.mkdir(self.settings[name])
if not os.path.exists(os.path.join(self.settings[name],self.settings['exp_name'])):
os.mkdir(os.path.join(self.settings[name],self.settings['exp_name']))
if not os.path.exists(os.path.join(self.settings[name],self.settings['exp_name'], self.src_domain)):
os.mkdir(os.path.join(self.settings[name],self.settings['exp_name'], self.src_domain))
else:
shutil.rmtree(os.path.join(self.settings[name],self.settings['exp_name'], self.src_domain))
os.mkdir(os.path.join(self.settings[name],self.settings['exp_name'], self.src_domain))
'''
Function to load model weights
'''
def load_model_weights(self, it_thresh='enough_iter', weights_file=None):
if weights_file == None:
weights_file = 'model_' + it_thresh + str(self.settings[it_thresh]) + '.pth'
load_weights_path = os.path.join(self.settings['weights_path'],self.settings['exp_name'], self.src_domain, weights_file)
print("Loading path = {}".format(load_weights_path))
dict_to_load = torch.load(load_weights_path,map_location=self.settings['device'])
model_state_dict = dict_to_load['model_state_dict']
for module,compts in self.network.model.items():
self.network.model[module].load_state_dict(model_state_dict[module])
'''
Function to load optimizer
'''
def load_optimizers(self, opt_file=None):
if opt_file == None:
opt_file = 'opt_enough_iter' + str(self.settings['enough_iter']) + '.pth'
load_weights_path = os.path.join(self.settings['weights_path'], self.settings['exp_name'], self.src_domain, opt_file)
dict_to_load = torch.load(load_weights_path,map_location=self.settings['device'])
optimizer_state_dict = dict_to_load['optimizer_state_dict']
for name,optimizer in self.optimizer_dict.items():
if self.settings['use_loss'][name]:
optimizer.load_state_dict(optimizer_state_dict[name])
def check_and_save_weights(self):
if self.current_iteration <= self.settings['enough_iter']:
val_acc = self.val_over_source_set()
if self.best_src_val_acc <= val_acc:
self.best_src_val_acc = val_acc
self.save_weights()
print("Saving at iteration {} with src. val accuracy = {}".format(self.current_iteration, self.best_src_val_acc))
else:
if self.current_iteration == self.settings['max_iter']:
self.save_weights()
'''
Function to save model and optimizer state
'''
def save_weights(self):
bkp_iter = self.current_iteration
if self.current_iteration <= self.settings['enough_iter']:
self.current_iteration = self.settings['enough_iter']
weights_path = self.settings['weights_path']
model_state_dict={}
for module,compts in self.network.model.items():
model_state_dict[module]=compts.cpu().state_dict()
optimizer_state_dict ={}
for name,optimizer in self.optimizer_dict.items():
optimizer_state_dict[name]=optimizer.state_dict()
save_dict = {
'model_state_dict':model_state_dict,
}
save_path = os.path.join(self.settings['weights_path'], self.exp_name, self.src_domain)
if not os.path.exists(save_path):
os.mkdir(save_path)
for it_thresh in ['enough_iter', 'max_iter']:
if self.current_iteration == self.settings[it_thresh]:
torch.save(save_dict, os.path.join(save_path, 'model_' + it_thresh + str(self.current_iteration) + '.pth'))
torch.save(save_dict, os.path.join(save_path, 'model_' + str(self.current_iteration) + '.pth'))
self.network.to(self.settings['device'])
save_dict = {
'optimizer_state_dict':optimizer_state_dict,
}
for it_thresh in ['enough_iter', 'max_iter']:
if self.current_iteration == self.settings[it_thresh]:
torch.save(save_dict, os.path.join(save_path, 'opt_' + it_thresh + str(self.current_iteration) + '.pth'))
torch.save(save_dict, os.path.join(save_path, 'opt_' + str(self.current_iteration) + '.pth'))
self.network.to(self.settings['device'])
self.current_iteration = bkp_iter
def save_summaries(self):
save_path = os.path.join(self.settings['summaries_path'], self.exp_name, self.src_domain)
np.savetxt(os.path.join(save_path, "exp_details" ), [self.settings['comments']], fmt="%s")
np.savetxt(os.path.join(save_path, "source_loss"), self.source_loss_history, fmt="%.5f")
for loss_type in self.adaptation_loss_history:
np.savetxt(os.path.join(save_path, "adaptation_loss_{}".format(loss_type)), self.adaptation_loss_history[loss_type], fmt="%.5f")
np.savetxt(os.path.join(save_path, "target_accuracy"), self.target_acc_history, fmt="%.5f")
np.savetxt(os.path.join(save_path, "distribution"), self.pseudo_target_dist, fmt="%.5f")
plt.title('Source log loss for domain {}'.format(self.src_domain))
plt.plot(np.log(self.source_loss_history))
plt.savefig(os.path.join(save_path, "source_loss_plot"))
plt.clf()
for loss_type in self.adaptation_loss_history:
plt.title('Adaptation log {} loss for domain {} -> {}'.format(loss_type, self.src_domain, self.trgt_domain))
plt.plot(np.log(self.adaptation_loss_history[loss_type]))
plt.savefig(os.path.join(save_path, "adaptation_{}_loss_plot".format(loss_type)))
plt.clf()
plt.title('Target accuracy')
plt.plot(self.it_history, self.target_acc_history)
plt.savefig(os.path.join(save_path, "target_accuracy_plot"))
plt.clf()
def load_summaries(self):
save_path = os.path.join(self.settings['summaries_path'], self.exp_name, self.src_domain)
self.source_loss_history = np.loadtxt(os.path.join(save_path, "source_loss"))
self.adaptation_loss_history = np.loadtxt(os.path.join(save_path, "adaptation_loss_total"))
self.target_acc_history = np.loadtxt(os.path.join(save_path, "target_accuracy"))
'''
Utility Functions to initialize source and target dataloaders
'''
def initialize_src_train_dataloader(self):
assert 0 < self.settings['train_ratio'][self.settings['dataset_name']] and self.settings['train_ratio'][self.settings['dataset_name']] <= 1
if self.source_dataset_train == None:
if 'domain-net' not in self.settings['dataset_name']:
self.source_dataset_train = FrozenDataset("{}_{}.csv".format(self.src_domain, self.src_domain), \
self.settings['train_ratio'][self.settings['dataset_name']])
# for x in self.settings['src_datasets']:
# if x != self.src_domain:
# self.source_dataset_other[x] = FrozenDataset("{}_{}.csv".format(self.src_domain, x), self.settings['train_ratio'][self.settings['dataset_name']])
else:
self.source_dataset_train = FrozenDataset("{}_{}_train.csv".format(self.src_domain, self.src_domain), 1.0)
source_val_data = FrozenDataset("{}_{}_test.csv".format(self.src_domain, self.src_domain), 1.0)
self.source_dataset_train.val_img = source_val_data.img
self.source_dataset_train.val_label = source_val_data.label
def initialize_target_val_dataloader(self):
if self.val_target_dataset == None:
if 'domain-net' not in self.settings['dataset_name']:
self.val_target_dataset = FrozenDataset("{}_{}.csv".format(self.src_domain, self.trgt_domain))
else:
self.val_target_dataset = FrozenDataset("{}_{}_test.csv".format(self.src_domain, self.trgt_domain))
def initialize_target_adapt_dataloader(self):
if self.adapt_target_dataset_train == None:
if 'domain-net' not in self.settings['dataset_name']:
self.adapt_target_dataset_train = FrozenDataset("{}_{}.csv".format(self.src_domain, self.trgt_domain))
else:
self.adapt_target_dataset_train = FrozenDataset("{}_{}_train.csv".format(self.src_domain, self.trgt_domain))
'''
Utility function to set the model in eval or train mode
'''
def set_mode(self,mode):
self.phase = mode
if self.phase == self.settings['mode']['train']:
self.network.train()
elif self.phase == self.settings['mode']['val']:
self.network.eval()
'''
Initializing optimizers
'''
def init_optimizers(self):
self.optimizer_dict = {}
# self.scheduler_dict = {}
to_train = self.settings['to_train']
for loss_name,loss_details in self.settings['optimizer_dict'].items():
if self.settings['use_loss'][loss_name]:
opt_param_list = []
for comp in loss_details:
if comp in to_train:
opt_param_list.append({'params':self.network.model[comp].parameters(), 'lr':self.settings['lr'][loss_name], 'weight_decay':5e-4})
self.optimizer_dict[loss_name] = optim.Adam(params = opt_param_list)
'''
Target dataset validation
'''
def val_over_target_set(self, save_weights=True):
self.set_mode(self.settings['mode']['val'])
self.initialize_target_val_dataloader()
with torch.no_grad():
all_labels_trgt = []
all_preds_trgt = []
for i in range(self.val_target_dataset.img.shape[0]):
images = self.val_target_dataset.img[i*self.batch_size : (i+1) * self.batch_size]
label = self.val_target_dataset.label[i*self.batch_size : (i+1) * self.batch_size]
if images.shape[0] == 0:
continue
x = images.to(self.settings['device']).float()
label = label.to(self.settings['device']).long()
F = self.network.model['Fs'](x)
C = self.network.model['C'](F)
cls_logits,_,mat = metrics.get_logits(feats={'C':C}, num_cls_heads=self.settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
all_labels_trgt.extend(list(label.cpu().numpy()))
all_preds_trgt.extend(list(cls_preds.cpu().numpy()))
if save_weights:
self.target_acc_history.append(metrics.get_metric('cls_acc',feats={'cls_labels':all_labels_trgt,'cls_preds':all_preds_trgt}))
self.it_history.append(self.current_iteration)
else:
print("target accuracy at iteration {} = {}".format(self.current_iteration, metrics.get_metric('cls_acc',feats={'cls_labels':all_labels_trgt,'cls_preds':all_preds_trgt})))
return metrics.get_metric('cls_acc',feats={'cls_labels':all_labels_trgt,'cls_preds':all_preds_trgt})
'''
Source dataset validation
'''
def val_over_source_set(self, save_weights=True):
self.set_mode(self.settings['mode']['val'])
# self.initialize_src_val_dataloader()
self.initialize_src_train_dataloader()
with torch.no_grad():
all_labels_src = []
all_preds_src = []
for i in range(self.source_dataset_train.val_img.shape[0]):
images = self.source_dataset_train.val_img[i*self.batch_size : (i+1) * self.batch_size]
label = self.source_dataset_train.val_label[i*self.batch_size : (i+1) * self.batch_size]
if images.shape[0] == 0:
continue
x = images.to(self.settings['device']).float()
label = label.to(self.settings['device']).long()
F = self.network.model['Fs'](x)
C = self.network.model['C'](F)
cls_logits,_,mat = metrics.get_logits(feats={'C':C}, num_cls_heads=self.settings['num_cls_heads'])
cls_confs,cls_preds = torch.max(cls_logits,dim=-1)
all_labels_src.extend(list(label.cpu().numpy()))
all_preds_src.extend(list(cls_preds.cpu().numpy()))
return metrics.get_metric('cls_acc',feats={'cls_labels':all_labels_src,'cls_preds':all_preds_src})
'''
Function to calculate the loss value
'''
def get_loss(self,which_loss):
assert which_loss in ['source', 'target']
if which_loss == 'source':
src_C = self.src_features['C']
src_labels = self.src_features['labels']
ce_loss = metrics.CrossEntropyLabelSmooth(num_classes=self.N_CLASSES, epsilon=0.1)(src_C, src_labels)
loss = ce_loss
self.source_loss_history.append(loss.item())
elif which_loss == 'target':
trgt_F = self.trgt_features['F']
trgt_C = self.trgt_features['C']
####################################################################################################
# Conditional Entropy loss
####################################################################################################
start_time = time.time()
logits,_,_ = metrics.get_logits(feats={'C':trgt_C}, num_cls_heads=self.settings['num_cls_heads'])
if self.settings['dataset_name'] in ['office-home', 'domain-net']:
softmax_ = nn.Softmax(dim=1)(logits) # slow down convergence
else:
softmax_ = logits
entropy = metrics.Entropy(softmax_)
conditional_entropy_loss = torch.mean(entropy)
conditional_entropy_loss = self.settings['gamma'][self.settings['dataset_name']] * conditional_entropy_loss
if self.current_iteration % self.settings['val_after']== 0 and self.settings['verbose']==True:
print("Computed entropy loss in {:.4f}".format(time.time() - start_time))
if 'conditional_entropy' in self.adaptation_loss_history:
self.adaptation_loss_history['conditional_entropy'].append(conditional_entropy_loss.item())
else:
self.adaptation_loss_history['conditional_entropy'] = [conditional_entropy_loss.item()]
####################################################################################################
# W2 loss
####################################################################################################
start_time = time.time()
# Compute the number of gaussian samples to be used for the current batch
normalized_dist = self.pseudo_target_dist / np.sum(self.pseudo_target_dist)
num_samples = np.array(normalized_dist * self.adapt_batch_size, dtype=int)
while self.adapt_batch_size > np.sum(num_samples):
idx = np.random.choice(range(self.N_CLASSES), p = normalized_dist)
num_samples[idx] += 1
# Get gaussian samples for the current batch
gz = []
gy = []
for c in range(self.N_CLASSES):
ind = torch.where(self.gaussian_y == c)[0]
perm = torch.randperm(ind.shape[0])
ind = ind[perm][:num_samples[c]]
gz.append(self.gaussian_z[ind])
gy.append(self.gaussian_y[ind])
gz = torch.cat(gz)
gy = torch.cat(gy)
if self.current_iteration % self.settings['val_after']== 0 and self.settings['verbose']==True:
print("Sampling from Gaussians took {:.4f}".format(time.time() - start_time))
w2_loss = metrics.sliced_wasserstein_distance(trgt_F, gz, self.settings['num_projections'], 2, self.settings['device'])
if 'w2' in self.adaptation_loss_history:
self.adaptation_loss_history['w2'].append(w2_loss.item())
else:
self.adaptation_loss_history['w2'] = [w2_loss.item()]
if self.current_iteration % self.settings['val_after']== 0 and self.settings['verbose']==True:
print("Computed W2 loss in {:.4f}".format(time.time() - start_time))
loss = w2_loss + conditional_entropy_loss
if 'total' in self.adaptation_loss_history:
self.adaptation_loss_history['total'].append(loss.item())
else:
self.adaptation_loss_history['total'] = [loss.item()]
return loss
'''
Function to select active losses
'''
def loss(self):
optim = self.optimizer_dict[self.active_losses[self.current_loss]]
optim.zero_grad()
loss = self.get_loss(self.active_losses[self.current_loss])
loss.backward()
optim.step()
'''
Function to implement the forward prop for a single source
'''
def forward(self):
self.set_mode(self.settings['mode']['train'])
if self.active_losses[self.current_loss] == 'source':
# Computing the values for the source domain
self.src_features = {}
images,labels = self.src_data['images'],self.src_data['labels']
feats_F = self.network.model['Fs'](images)
feats_C = self.network.model['C'](feats_F)
self.src_features['F'] = feats_F
self.src_features['C'] = feats_C
self.src_features['labels'] = labels
elif self.active_losses[self.current_loss] == 'target':
start_time = time.time()
# Computing target domain info
self.trgt_features = {}
images,labels = self.trgt_data['images'],self.trgt_data['labels']
# During adaptation, keep the feature extractor frozen
feats_F = self.network.model['Fs'](images)
feats_C = self.network.model['C'](feats_F)
self.trgt_features['F'] = feats_F
self.trgt_features['C'] = feats_C
self.trgt_features['labels'] = labels
if self.current_iteration % self.settings['val_after']== 0 and self.settings['verbose']==True:
print("Forward pass of the network in {:.4f}".format(time.time() - start_time))
def disable_dropout(self):
for module in self.network.model['Fs'].net.modules():
if isinstance(module, torch.nn.Dropout):
module.eval()
for module in self.network.model['C'].net.modules():
if isinstance(module, torch.nn.Dropout):
module.eval()
def enable_dropout(self):
for module in self.network.model['Fs'].net.modules():
if isinstance(module, torch.nn.Dropout):
module.train()
for module in self.network.model['C'].net.modules():
if isinstance(module, torch.nn.Dropout):
module.train()
'''
Function for training the the data
This function is called at every iteration
'''
def train(self):
self.src_data = {}
self.trgt_data = {}
self.current_loss = 0
if self.current_iteration > max(self.settings['val_after'],self.settings['enough_iter']):
self.current_loss = 1
cond_1 = self.active_losses[self.current_loss] not in self.settings['losses_after_enough_iters']
cond_2 = self.current_iteration <= max(self.settings['val_after'],self.settings['enough_iter'])
# self.initialize_target_adapt_dataloader()
if (cond_1 and cond_2) or (not cond_2):
if self.current_iteration <= max(self.settings['val_after'],self.settings['enough_iter']):
self.src_data['images'], self.src_data['labels'] = self.source_dataset_train.sample(self.batch_size)
self.src_data['images'] = Variable(self.src_data['images']).to(self.settings['device']).float()
self.src_data['labels'] = Variable(self.src_data['labels']).to(self.settings['device']).long()
else:
# Distribution matching between target domain and gaussians
if self.current_iteration == max(self.settings['val_after'], self.settings['enough_iter']) + 1:
# Set the learning rate appropriately
self.init_optimizers()
self.disable_dropout()
print()
print("STARTING ADAPTION FOR MODEL TRAINED ON {}".format(self.src_domain))
self.pseudo_target_dist = np.ones(self.N_CLASSES, dtype=int)
print("FINALIZED DISTRIBUTION FOR ADAPTATION ON {}:".format(self.trgt_domain))
print(self.pseudo_target_dist)
# Learn Gaussians
gaussian_utils.learn_gaussians(self)
n_samples = np.ones(self.N_CLASSES, dtype=int) * self.settings["gaussian_samples_per_class"]
self.gaussian_z, self.gaussian_y = gaussian_utils.sample_from_gaussians(self.means, self.covs, n_samples)
self.gaussian_z = torch.as_tensor(self.gaussian_z).to(self.settings['device']).float()
self.gaussian_y = torch.as_tensor(self.gaussian_y).to(self.settings['device']).long()
self.initialize_target_adapt_dataloader()
# Keep the classifiers frozen
for param in self.network.model['C'].parameters():
param.requires_grad = False
start_time = time.time()
self.trgt_data['images'],self.trgt_data['labels'] = self.adapt_target_dataset_train.sample(self.adapt_batch_size)
self.trgt_data['images'] = Variable(self.trgt_data['images']).to(self.settings['device']).float()
self.trgt_data['labels'] = Variable(self.trgt_data['labels']).to(self.settings['device']).long()
if self.current_iteration % self.settings['val_after']== 0 and self.settings['verbose']==True:
print("Sampled batch in {:.4f}".format(time.time() - start_time))
# Correctly set networks and optimizers for source training or adaptation
if self.current_iteration > max(self.settings['val_after'],self.settings['enough_iter']):
self.disable_dropout()
else:
self.enable_dropout()
self.forward()
self.loss() | 27,051 | 47.480287 | 187 | py |
sar_transformer | sar_transformer-main/test.py | # Code for testing on real SAR images
# Author: Malsha Perera
import argparse
import torch
import torchvision
from torch import nn
from torchvision.transforms import functional as F
import os
import numpy as np
import torch
from transform_main import TransSAR, TransSARV2, TransSARV3
import cv2
parser = argparse.ArgumentParser(description='TransSAR')
parser.add_argument('--cuda', default="on", type=str,
help='switch on/off cuda option (default: off)')
parser.add_argument('--load', default='default', type=str,
help='turn on img augmentation (default: default)')
parser.add_argument('--save_path', required=True , type=str,
help='turn on img augmentation (default: default)')
parser.add_argument('--model', type=str,
help='model name')
parser.add_argument('--crop', type=int, default=None)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--loadmodel', default='load', type=str)
args = parser.parse_args()
modelname = args.model
loaddirec = args.loadmodel
save_path = args.save_path
device = torch.device("cuda")
model = TransSARV2()
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model,device_ids=[0,1]).cuda()
model.to(device)
model.load_state_dict(torch.load(loaddirec))
model.eval()
if not os.path.isdir(save_path):
os.makedirs(save_path)
im_file = './test_images/test_01.png'
img = cv2.imread(im_file,0)
noisy_im = (np.float32(img)+1.0)/256.0
x = np.float32(noisy_im)
x = F.to_tensor(x)
x = x.unsqueeze(0)
pred_im = model(x)
tmp = pred_im.detach().cpu().numpy()
tmp = tmp.squeeze()
tmp = tmp*256 -1
filename_out = 'test_01_results.png'
filepath_out = save_path + filename_out
cv2.imwrite(filepath_out,tmp)
print('done')
| 1,872 | 18.925532 | 71 | py |
sar_transformer | sar_transformer-main/transform_main.py | import torch
import torch.nn as nn
import torch.nn.functional
import torch.nn.functional as F
from functools import partial
from arch.trans_basenetworks import *
import timm
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import types
import math
from abc import ABCMeta, abstractmethod
from mmcv.cnn import ConvModule
import pdb
class EncoderTransformer(nn.Module):
def __init__(self, img_size=256, patch_size=16, in_chans=1, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[2, 2, 2, 2], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch embedding definitions
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# for Intra-patch transformer blocks
# self.mini_patch_embed1 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
# embed_dim=embed_dims[1])
# self.mini_patch_embed2 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
# embed_dim=embed_dims[2])
# self.mini_patch_embed3 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
# embed_dim=embed_dims[3])
# self.mini_patch_embed4 = OverlapPatchEmbed(img_size=img_size // 32, patch_size=3, stride=2, in_chans=embed_dims[0],
# embed_dim=embed_dims[3])
# main encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
# intra-patch encoder
self.patch_block1 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(1)])
self.pnorm1 = norm_layer(embed_dims[1])
# main encoder
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
# intra-patch encoder
self.patch_block2 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(1)])
self.pnorm2 = norm_layer(embed_dims[2])
# main encoder
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
# intra-patch encoder
self.patch_block3 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[1], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(1)])
self.pnorm3 = norm_layer(embed_dims[3])
# main encoder
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
# def init_weights(self, pretrained=None):
# if isinstance(pretrained, str):
# logger = get_root_logger()
# load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
B = x.shape[0]
outs = []
embed_dims=[64, 128, 320, 512]
# stage 1
x1, H1, W1 = self.patch_embed1(x)
# x2, H2, W2 = self.mini_patch_embed1(x1.permute(0,2,1).reshape(B,embed_dims[0],H1,W1))
for i, blk in enumerate(self.block1):
x1 = blk(x1, H1, W1)
x1 = self.norm1(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
# for i, blk in enumerate(self.patch_block1):
# x2 = blk(x2, H2, W2)
# x2 = self.pnorm1(x2)
# x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 2
x1, H1, W1 = self.patch_embed2(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[1],H1,W1)
# x2, H2, W2 = self.mini_patch_embed2(x1)
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block2):
x1 = blk(x1, H1, W1)
x1 = self.norm2(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# for i, blk in enumerate(self.patch_block2):
# x2 = blk(x2, H2, W2)
# x2 = self.pnorm2(x2)
# x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# stage 3
x1, H1, W1 = self.patch_embed3(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[2],H1,W1)
# x2, H2, W2 = self.mini_patch_embed3(x1)
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block3):
x1 = blk(x1, H1, W1)
x1 = self.norm3(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# for i, blk in enumerate(self.patch_block3):
# x2 = blk(x2, H2, W2)
# x2 = self.pnorm3(x2)
# x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# stage 4
x1, H1, W1 = self.patch_embed4(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[3],H1,W1)
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block4):
x1 = blk(x1, H1, W1)
x1 = self.norm4(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
return outs
def forward(self, x):
x = self.forward_features(x)
return x
class EncoderTransformerV2(nn.Module):
def __init__(self, img_size=256, patch_size=16, in_chans=1, num_classes=1, embed_dims=[32, 64, 128, 320, 512],
num_heads=[1, 1, 2, 4, 8], mlp_ratios=[2, 2, 2, 2, 2], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 3, 4, 6, 3], sr_ratios=[8, 8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch embedding definitions (COnvolutional layer)
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=2, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 2, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
self.patch_embed5 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[3],
embed_dim=embed_dims[4])
# Stage 1 (h/2 x w/2)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
# Stage 2 (h/4 x w/4)
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
# Stage 3 (h/8 x w/8)
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
# Stage 4 (h/16 x w/16)
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
# Stage 5 (h/32 x w/32)
cur += depths[3]
self.block5 = nn.ModuleList([Block(
dim=embed_dims[4], num_heads=num_heads[4], mlp_ratio=mlp_ratios[4], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[4])
for i in range(depths[4])])
self.norm5 = norm_layer(embed_dims[4])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
# def init_weights(self, pretrained=None):
# if isinstance(pretrained, str):
# logger = get_root_logger()
# load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[3]
for i in range(self.depths[4]):
self.block5[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
B = x.shape[0]
outs = []
# stage 1
x1, H1, W1 = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x1 = blk(x1, H1, W1)
x1 = self.norm1(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 2
x1, H1, W1 = self.patch_embed2(x1)
for i, blk in enumerate(self.block2):
x1 = blk(x1, H1, W1)
x1 = self.norm2(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 3
x1, H1, W1 = self.patch_embed3(x1)
for i, blk in enumerate(self.block3):
x1 = blk(x1, H1, W1)
x1 = self.norm3(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 4
x1, H1, W1 = self.patch_embed4(x1)
for i, blk in enumerate(self.block4):
x1 = blk(x1, H1, W1)
x1 = self.norm4(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 5
x1, H1, W1 = self.patch_embed5(x1)
for i, blk in enumerate(self.block5):
x1 = blk(x1, H1, W1)
x1 = self.norm5(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
return outs
def forward(self, x):
x = self.forward_features(x)
return x
class EncoderTransformerV3(nn.Module):
def __init__(self, img_size=256, patch_size=16, in_chans=1, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch embedding definitions
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# for Intra-patch transformer blocks
self.mini_patch_embed1 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.mini_patch_embed2 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.mini_patch_embed3 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
self.mini_patch_embed4 = OverlapPatchEmbed(img_size=img_size // 32, patch_size=3, stride=2, in_chans=embed_dims[0],
embed_dim=embed_dims[3])
# main encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[0])
# intra-patch encoder
self.patch_block1 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(1)])
self.pnorm1 = norm_layer(embed_dims[1])
# main encoder
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
self.norm2 = norm_layer(embed_dims[1])
# intra-patch encoder
self.patch_block2 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(1)])
self.pnorm2 = norm_layer(embed_dims[2])
# main encoder
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
self.norm3 = norm_layer(embed_dims[2])
# intra-patch encoder
self.patch_block3 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[1], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(1)])
self.pnorm3 = norm_layer(embed_dims[3])
# main encoder
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm4 = norm_layer(embed_dims[3])
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
# def init_weights(self, pretrained=None):
# if isinstance(pretrained, str):
# logger = get_root_logger()
# load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
B = x.shape[0]
outs = []
embed_dims=[64, 128, 320, 512]
# stage 1
x1, H1, W1 = self.patch_embed1(x)
x2, H2, W2 = self.mini_patch_embed1(x1.permute(0,2,1).reshape(B,embed_dims[0],H1,W1))
for i, blk in enumerate(self.block1):
x1 = blk(x1, H1, W1)
x1 = self.norm1(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
for i, blk in enumerate(self.patch_block1):
x2 = blk(x2, H2, W2)
x2 = self.pnorm1(x2)
x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
# stage 2
x1, H1, W1 = self.patch_embed2(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[1],H1,W1)+x2
x2, H2, W2 = self.mini_patch_embed2(x1)
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block2):
x1 = blk(x1, H1, W1)
x1 = self.norm2(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
for i, blk in enumerate(self.patch_block2):
x2 = blk(x2, H2, W2)
x2 = self.pnorm2(x2)
x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# stage 3
x1, H1, W1 = self.patch_embed3(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[2],H1,W1)+x2
x2, H2, W2 = self.mini_patch_embed3(x1)
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block3):
x1 = blk(x1, H1, W1)
x1 = self.norm3(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
for i, blk in enumerate(self.patch_block3):
x2 = blk(x2, H2, W2)
x2 = self.pnorm3(x2)
x2 = x2.reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# stage 4
x1, H1, W1 = self.patch_embed4(x1)
x1 = x1.permute(0,2,1).reshape(B,embed_dims[3],H1,W1)+x2
x1 = x1.view(x1.shape[0],x1.shape[1],-1).permute(0,2,1)
for i, blk in enumerate(self.block4):
x1 = blk(x1, H1, W1)
x1 = self.norm4(x1)
x1 = x1.reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x1)
return outs
def forward(self, x):
x = self.forward_features(x)
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
# pdb.set_trace()
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners)
############################################################
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Attention_dec(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.task_query = nn.Parameter(torch.randn(1,48,dim))
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
task_q = self.task_query
# This is because we fix the task parameters to be of a certain dimension, so with varying batch size, we just stack up the same queries to operate on the entire batch
if B>1:
task_q = task_q.unsqueeze(0).repeat(B,1,1,1)
task_q = task_q.squeeze(1)
q = self.q(task_q).reshape(B, task_q.shape[1], self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
q = torch.nn.functional.interpolate(q,size= (v.shape[2],v.shape[3]))
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block_dec(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention_dec(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
class DecoderTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch_embed
self.patch_embed1 = OverlapPatchEmbed(img_size=img_size//16, patch_size=3, stride=2, in_chans=embed_dims[3],
embed_dim=embed_dims[3])
# transformer decoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block_dec(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[0])])
self.norm1 = norm_layer(embed_dims[3])
cur += depths[0]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
# def init_weights(self, pretrained=None):
# if isinstance(pretrained, str):
# logger = get_root_logger()
# load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def forward_features(self, x):
x=x[3]
B = x.shape[0]
outs = []
# stage 1
x, H, W = self.patch_embed1(x)
for i, blk in enumerate(self.block1):
x = blk(x, H, W)
x = self.norm1(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
class Tenc(EncoderTransformer):
def __init__(self, **kwargs):
super(Tenc, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 4, 4], mlp_ratios=[2, 2, 2, 2],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[4, 2, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class TencV3(EncoderTransformerV3):
def __init__(self, **kwargs):
super(TencV3, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 4, 4], mlp_ratios=[2, 2, 2, 2],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[4, 2, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class Tdec(DecoderTransformer):
def __init__(self, **kwargs):
super(Tdec, self).__init__(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
drop_rate=0.0, drop_path_rate=0.1)
class convprojection(nn.Module):
def __init__(self, path=None, **kwargs):
super(convprojection,self).__init__()
self.convd32x = UpsampleConvLayer(512, 512, kernel_size=4, stride=2)
self.convd16x = UpsampleConvLayer(512, 320, kernel_size=4, stride=2)
self.dense_4 = nn.Sequential(ResidualBlock(320))
self.convd8x = UpsampleConvLayer(320, 128, kernel_size=4, stride=2)
self.dense_3 = nn.Sequential(ResidualBlock(128))
self.convd4x = UpsampleConvLayer(128, 64, kernel_size=4, stride=2)
self.dense_2 = nn.Sequential(ResidualBlock(64))
self.convd2x = UpsampleConvLayer(64, 16, kernel_size=4, stride=2)
self.dense_1 = nn.Sequential( ResidualBlock(16))
self.convd1x = UpsampleConvLayer(16, 8, kernel_size=4, stride=2)
self.conv_output = ConvLayer(8, 1, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
def forward(self,x1,x2):
res32x = self.convd32x(x2[0])
if x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
p2d = (0,-1,0,-1)
res32x = F.pad(res32x,p2d,"constant",0)
elif x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] == res32x.shape[2]:
p2d = (0,-1,0,0)
res32x = F.pad(res32x,p2d,"constant",0)
elif x1[3].shape[3] == res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
p2d = (0,0,0,-1)
res32x = F.pad(res32x,p2d,"constant",0)
res16x = res32x + x1[3]
res16x = self.convd16x(res16x)
if x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,-1,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] == res16x.shape[2]:
p2d = (0,-1,0,0)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] == res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,0,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
res8x = self.dense_4(res16x) + x1[2]
res8x = self.convd8x(res8x)
res4x = self.dense_3(res8x) + x1[1]
res4x = self.convd4x(res4x)
res2x = self.dense_2(res4x) + x1[0]
res2x = self.convd2x(res2x)
x = res2x
x = self.dense_1(x)
x = self.convd1x(x)
return x
class convprojection_base(nn.Module):
def __init__(self, path=None, **kwargs):
super(convprojection_base,self).__init__()
# self.convd32x = UpsampleConvLayer(512, 512, kernel_size=4, stride=2)
self.convd16x = UpsampleConvLayer(512, 320, kernel_size=4, stride=2)
self.dense_4 = nn.Sequential(ResidualBlock(320))
self.convd8x = UpsampleConvLayer(320, 128, kernel_size=4, stride=2)
self.dense_3 = nn.Sequential(ResidualBlock(128))
self.convd4x = UpsampleConvLayer(128, 64, kernel_size=4, stride=2)
self.dense_2 = nn.Sequential(ResidualBlock(64))
self.convd2x = UpsampleConvLayer(64, 16, kernel_size=4, stride=2)
self.dense_1 = nn.Sequential( ResidualBlock(16))
self.convd1x = UpsampleConvLayer(16, 8, kernel_size=4, stride=2)
self.conv_output = ConvLayer(8, 1, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
def forward(self,x1):
# if x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,-1,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] == res32x.shape[2]:
# p2d = (0,-1,0,0)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] == res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,0,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# res16x = res32x + x1[3]
res16x = self.convd16x(x1[3])
if x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,-1,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] == res16x.shape[2]:
p2d = (0,-1,0,0)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] == res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,0,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
res8x = self.dense_4(res16x) + x1[2]
res8x = self.convd8x(res8x)
res4x = self.dense_3(res8x) + x1[1]
res4x = self.convd4x(res4x)
res2x = self.dense_2(res4x) + x1[0]
res2x = self.convd2x(res2x)
x = res2x
x = self.dense_1(x)
x = self.convd1x(x)
return x
class convprojection_baseV2(nn.Module):
def __init__(self, path=None, **kwargs):
super(convprojection_baseV2,self).__init__()
self.convd32x = UpsampleConvLayer(512, 320, kernel_size=4, stride=2)
self.dense_5 = nn.Sequential(ResidualBlock(320))
self.convd16x = UpsampleConvLayer(320, 128, kernel_size=4, stride=2)
self.dense_4 = nn.Sequential(ResidualBlock(128))
self.convd8x = UpsampleConvLayer(128, 64, kernel_size=4, stride=2)
self.dense_3 = nn.Sequential(ResidualBlock(64))
self.convd4x = UpsampleConvLayer(64, 32, kernel_size=4, stride=2)
self.dense_2 = nn.Sequential(ResidualBlock(32))
self.convd2x = UpsampleConvLayer(32, 16, kernel_size=4, stride=2)
self.dense_1 = nn.Sequential( ResidualBlock(16))
self.convd1x = UpsampleConvLayer(16, 8, kernel_size=3, stride=1)
def forward(self,x1):
# if x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,-1,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] == res32x.shape[2]:
# p2d = (0,-1,0,0)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] == res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,0,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# res16x = res32x + x1[3]
res16x = self.convd32x(x1[4])
res16x = self.dense_5(res16x) + x1[3]
res8x = self.convd16x(res16x)
res8x = self.dense_4(res8x) + x1[2]
res4x = self.convd8x(res8x)
res4x = self.dense_3(res4x) + x1[1]
res2x = self.convd4x(res4x)
res2x = self.dense_2(res2x) + x1[0]
res1x = self.convd2x(res2x)
x = self.dense_1(res1x)
x = self.convd1x(x)
return x
class convprojection_baseV3(nn.Module):
def __init__(self, path=None, **kwargs):
super(convprojection_baseV3,self).__init__()
# self.convd32x = UpsampleConvLayer(512, 512, kernel_size=4, stride=2)
self.convd16x = UpsampleConvLayer(512, 320, kernel_size=4, stride=2)
self.dense_4 = nn.Sequential(ResidualBlock(320))
self.convd8x = UpsampleConvLayer(320, 128, kernel_size=4, stride=2)
self.dense_3 = nn.Sequential(ResidualBlock(128))
self.convd4x = UpsampleConvLayer(128, 64, kernel_size=4, stride=2)
self.dense_2 = nn.Sequential(ResidualBlock(64))
self.convd2x = UpsampleConvLayer(64, 16, kernel_size=4, stride=2)
self.dense_1 = nn.Sequential( ResidualBlock(16))
self.convd1x = UpsampleConvLayer(16, 8, kernel_size=4, stride=2)
self.conv_output = ConvLayer(8, 3, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
def forward(self,x1):
# if x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,-1,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] != res32x.shape[3] and x1[3].shape[2] == res32x.shape[2]:
# p2d = (0,-1,0,0)
# res32x = F.pad(res32x,p2d,"constant",0)
# elif x1[3].shape[3] == res32x.shape[3] and x1[3].shape[2] != res32x.shape[2]:
# p2d = (0,0,0,-1)
# res32x = F.pad(res32x,p2d,"constant",0)
# res16x = res32x + x1[3]
res16x = self.convd16x(x1[3])
if x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,-1,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] != res16x.shape[3] and x1[2].shape[2] == res16x.shape[2]:
p2d = (0,-1,0,0)
res16x = F.pad(res16x,p2d,"constant",0)
elif x1[2].shape[3] == res16x.shape[3] and x1[2].shape[2] != res16x.shape[2]:
p2d = (0,0,0,-1)
res16x = F.pad(res16x,p2d,"constant",0)
res8x = self.dense_4(res16x) + x1[2]
res8x = self.convd8x(res8x)
res4x = self.dense_3(res8x) + x1[1]
res4x = self.convd4x(res4x)
res2x = self.dense_2(res4x) + x1[0]
res2x = self.convd2x(res2x)
x = res2x
x = self.dense_1(x)
x = self.convd1x(x)
return x
## The following is the network which can be fine-tuned for specific datasets
class TransSAR(nn.Module):
def __init__(self, path=None, **kwargs):
super(TransSAR, self).__init__()
self.Tenc = Tenc()
self.convproj = convprojection_base()
self.clean = ConvLayer(8, 1, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
if path is not None:
self.load(path)
def forward(self, x):
x1 = self.Tenc(x)
x = self.convproj(x1)
clean = self.active(self.clean(x))
return clean
class TransSARV2(nn.Module):
def __init__(self, path=None, **kwargs):
super(TransSARV2, self).__init__()
self.Tenc = EncoderTransformerV2(img_size=256, patch_size=16, in_chans=1, num_classes=1, embed_dims=[32, 64, 128, 320, 512],
num_heads=[1, 1, 2, 4, 8], mlp_ratios=[2, 2, 2, 2, 2], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 3, 4, 6, 3], sr_ratios=[8, 8, 4, 2, 1])
self.convproj = convprojection_baseV2()
self.clean = ConvLayer(8, 1, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
def forward(self, x):
x1 = self.Tenc(x)
x = self.convproj(x1)
clean = self.active(self.clean(x))
return clean
class TransSARV3(nn.Module):
def __init__(self, path=None, **kwargs):
super(TransSARV3, self).__init__()
self.Tenc = TencV3()
self.convproj = convprojection_baseV3()
self.clean = ConvLayer(8, 3, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
if path is not None:
self.load(path)
def forward(self, x):
x1 = self.Tenc(x)
x = self.convproj(x1)
clean = self.active(self.clean(x))
return clean
## The following is original network found in paper which solves all-weather removal problems
## using a single model
# class Transweather(nn.Module):
# def __init__(self, path=None, **kwargs):
# super(Transweather, self).__init__()
# self.Tenc = Tenc()
# self.Tdec = Tdec()
# self.convtail = convprojection()
# self.clean = ConvLayer(8, 3, kernel_size=3, stride=1, padding=1)
# self.active = nn.Tanh()
# if path is not None:
# self.load(path)
# def forward(self, x):
# x1 = self.Tenc(x)
# x2 = self.Tdec(x1)
# x = self.convtail(x1,x2)
# clean = self.active(self.clean(x))
# return clean
# def load(self, path):
# """
# Load checkpoint.
# """
# checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
# model_state_dict_keys = self.state_dict().keys()
# checkpoint_state_dict_noprefix = strip_prefix_if_present(checkpoint['state_dict'], "module.")
# self.load_state_dict(checkpoint_state_dict_noprefix, strict=False)
# del checkpoint
# torch.cuda.empty_cache() | 55,366 | 39.50256 | 175 | py |
sar_transformer | sar_transformer-main/utils.py | import os
import numpy as np
import torch
from skimage import io,color
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms import functional as F
from typing import Callable
import os
import cv2
import pandas as pd
from numbers import Number
from typing import Container
from collections import defaultdict
from scipy.io import loadmat
class BSD_SAR(Dataset):
"""
Reads the synthetic images (created useing create_synthetic_data.py) saved as .mat files .
"""
def __init__(self, dataset_path, crop_size, training_set=True) -> None:
self.dataset_path = dataset_path
# self.input_path = os.path.join(dataset_path, 'noisy')
# self.output_path = os.path.join(dataset_path, 'clean')
self.images_list = os.listdir(self.dataset_path)
self.training_set = training_set
self.crop = crop_size
def __len__(self):
return len(os.listdir(self.dataset_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
# read .dat file
data_SAR = loadmat(os.path.join(self.dataset_path, image_filename))
# get noisy image and numpy to tensor
image = data_SAR['noisy']
image = np.sqrt(image + 1e-10)
image = F.to_pil_image(image)
# get clean image and numpy to tensor
mask = data_SAR['clean']
mask = np.sqrt(mask + 1e-10)
mask = F.to_pil_image(mask)
# print(image.shape)
# print(mask.shape)
# # read noisy image
# image = cv2.imread(os.path.join(self.input_path, image_filename),0)
# # read clean image
# mask = cv2.imread(os.path.join(self.output_path, image_filename),0)
# # transforming to PIL image
# image, mask = F.to_pil_image(image), F.to_pil_image(mask)
if self.training_set:
# # random resized crop
# i, j, h, w = T.RandomResizedCrop.get_params(image,scale= (0.12, 1.0), ratio=(1, 1))
# image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
# random crop
i, j, h, w = T.RandomCrop.get_params(image, output_size= self.crop)
image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
# # resize
# image, mask = F.resize(image,self.crop), F.resize(mask,self.crop)
# rotation
a = T.RandomRotation.get_params((-90, 90))
image, mask = F.rotate(image, a), F.rotate(mask, a)
# random horizontal flipping
if np.random.rand() < 0.5:
image, mask = F.hflip(image), F.hflip(mask)
# random affine transform
if np.random.rand() < 0.5:
affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), self.crop)
image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)
else:
# random crop
i, j, h, w = T.RandomCrop.get_params(image, output_size= self.crop)
image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
# transforming to tensor
image = F.to_tensor(image)
mask = F.to_tensor(mask)
return image, mask, image_filename
| 3,403 | 28.6 | 111 | py |
sar_transformer | sar_transformer-main/train.py | # Code for training TransSAR on synthetic images
# Author: Malsha Perera
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import argparse
import torchvision
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn.functional as F
from torchvision import transforms as T
import os
import matplotlib.pyplot as plt
import numpy as np
import torch.nn.init as init
from utils import BSD_SAR
from transform_main import TransSAR, TransSARV2, TransSARV3
parser = argparse.ArgumentParser(description='TransSAR')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run(default: 1)')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch_size', default=1, type=int,
metavar='N', help='batch size (default: 8)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
metavar='LR', help='initial learning rate (default: 0.01)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-5, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--lfw_path', default='../lfw', type=str, metavar='PATH',
help='path to root path of lfw dataset (default: ../lfw)')
parser.add_argument('--train_dataset', required=True, type=str)
parser.add_argument('--val_dataset', required=True, type=str)
parser.add_argument('--modelname', default='off', type=str,
help='turn on img augmentation (default: False)')
parser.add_argument('--cuda', default="on", type=str,
help='switch on/off cuda option (default: off)')
parser.add_argument('--aug', default='off', type=str,
help='turn on img augmentation (default: False)')
parser.add_argument('--load', default='default', type=str,
help='turn on img augmentation (default: default)')
parser.add_argument('--save', default='default', type=str,
help='turn on img augmentation (default: default)')
parser.add_argument('--model', default='TransSARV2', type=str,
help='model name')
parser.add_argument('--direc', required=True , type=str,
help='directory to save')
parser.add_argument('--crop', type=int ,default=256)
parser.add_argument('--device', default='cuda', type=str)
parser.add_argument('--lambda_loss', default=0.04, type=float)
args = parser.parse_args()
aug = args.aug
direc = args.direc
num_epochs = args.epochs
modelname = args.modelname
crop_size = (args.crop, args.crop)
lambda_loss = args.lambda_loss
def total_variation(image_in):
tv_h = torch.sum(torch.abs(image_in[ :, :-1] - image_in[ :, 1:]))
tv_w = torch.sum(torch.abs(image_in[ :-1, :] - image_in[ 1:, :]))
tv_loss = tv_h + tv_w
return tv_loss
def TV_loss(im_batch, weight):
TV_L = 0.0
for tv_idx in range(len(im_batch)):
TV_L = TV_L + total_variation(im_batch[tv_idx,0,:,:])
TV_L = TV_L/len(im_batch)
return weight*TV_L
def weight_init(m):
'''
Usage:
model = Model()
model.apply(weight_init)
'''
if isinstance(m, nn.Conv1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.Conv3d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose1d):
init.normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose2d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
elif isinstance(m, nn.ConvTranspose3d):
init.xavier_normal_(m.weight.data)
if m.bias is not None:
init.normal_(m.bias.data)
train_dataset = BSD_SAR(args.train_dataset, crop_size, training_set=True)
val_dataset = BSD_SAR(args.val_dataset, crop_size, training_set=False)
dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valloader = DataLoader(val_dataset, 1, shuffle=True)
device = torch.device("cuda")
model = TransSARV2()
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model,device_ids=[0,1]).cuda()
model.to(device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(list(model.parameters()), lr=args.learning_rate,
weight_decay=1e-5)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Total_params: {}".format(pytorch_total_params))
def train_model(model, criterion, optimizer, dataloader, valloader, direc, num_epochs=400):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = 1000
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
running_loss = 0.0
running_loss_tv = 0.0
for batch_idx, (X_batch, y_batch, *rest) in enumerate(dataloader):
X_batch = Variable(X_batch.to(device ='cuda'))
y_batch = Variable(y_batch.to(device='cuda'))
output = model(X_batch)
# print(output.size())
loss = criterion(output, y_batch)
loss = loss + TV_loss(output,0.0000005)
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
epoch_loss = running_loss / (batch_idx+1)
print('{} Loss: {:.4f}'.format(phase, epoch_loss))
fulldir = direc+ "/all/" +"/{}/".format(epoch)
if not os.path.isdir(fulldir):
os.makedirs(fulldir)
torch.save(model.state_dict(), fulldir+args.model+".pth")
else:
model.eval() # Set model to evaluate model
running_loss = 0.0
for batch_idx, (X_batch, y_batch, *rest) in enumerate(valloader):
X_batch = Variable(X_batch.to(device='cuda'))
y_batch = Variable(y_batch.to(device='cuda'))
output = model(X_batch)
loss = criterion(output, y_batch)
optimizer.zero_grad()
running_loss += loss.item()
epoch_loss = running_loss / (batch_idx+1)
print('{} Loss (MSE): {:.4f}'.format(phase, epoch_loss))
if epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(model.state_dict(), direc+"model.pth")
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
return model
model_ft = train_model(model, criterion, optimizer, dataloader, valloader, direc, num_epochs)
| 8,498 | 32.199219 | 93 | py |
sar_transformer | sar_transformer-main/arch/trans_basenetworks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from torch.autograd import Function
from math import sqrt
import random
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation != 'no':
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ConvLayer, self).__init__()
# reflection_padding = kernel_size // 2
# self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
# out = self.reflection_pad(x)
out = self.conv2d(x)
return out
class UpsampleConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(UpsampleConvLayer, self).__init__()
self.conv2d = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=1)
def forward(self, x):
out = self.conv2d(x)
return out
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.conv1(x))
out = self.conv2(out) * 0.1
out = torch.add(out, residual)
return out
def init_linear(linear):
init.xavier_normal(linear.weight)
linear.bias.data.zero_()
def init_conv(conv, glu=True):
init.kaiming_normal(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module | 5,081 | 29.614458 | 126 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/config.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : config.py
# @Time : Created at 2019-03-18
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import os
import re
import time
import torch
from torch.nn import functional as F
from time import strftime, localtime
# ===Program===
if_test = False
CUDA = True
if_save = True
data_shuffle = False # False
oracle_pretrain = True # True
gen_pretrain = True
dis_pretrain = False
clas_pretrain = False
run_model = 'relgan' # seqgan, leakgan, maligan, jsdgan, relgan, sentigan
k_label = 2 # num of labels, >=2
gen_init = 'truncated_normal' # normal, uniform, truncated_normal
dis_init = 'uniform' # normal, uniform, truncated_normal
# ===Oracle or Real, type===
if_real_data = True # if use real data
dataset = 'emnlp_news' # oracle, image_coco, emnlp_news, amazon_app_book, mr15
model_type = 'RMC' # vanilla, RMC (custom)
loss_type = 'WGAN' # standard, JS, KL, hinge, tv, LS, rsgan (for RelGAN)
# loss_type = 'rsgan' # standard, JS, KL, hinge, tv, LS, rsgan (for RelGAN)
# print(loss_type[-4:])
GP = (loss_type[-4:]=="WGAN") # Gradien Penalty
LAMBDA = 5 #
vocab_size = 5255 # oracle: 5000, coco: 6613, emnlp: 5255, amazon_app_book: 6418, mr15: 6289
max_seq_len = 51 # oracle: 20, coco: 37, emnlp: 51, amazon_app_book: 40
ADV_train_epoch = 3000 # SeqGAN, LeakGAN-200, RelGAN-3000
extend_vocab_size = 0 # plus test data, only used for Classifier
temp_adpt = 'exp' # no, lin, exp, log, sigmoid, quad, sqrt
# temperature = 1
temp_scale = 0.2
temperature = 5
# temp_scale = 1
# ===Basic Train===
samples_num = 10000 # 10000, mr15: 2000,
MLE_train_epoch = 150 # SeqGAN-80, LeakGAN-8, RelGAN-150
PRE_clas_epoch = 10
inter_epoch = 15 # LeakGAN-10
batch_size = 64 # 64
start_letter = 1
padding_idx = 0
start_token = 'BOS'
padding_token = 'EOS'
gen_lr = 0.01 # 0.01
gen_adv_lr = 1e-4 # RelGAN-1e-4
dis_lr = 1e-4 # SeqGAN,LeakGAN-1e-2, RelGAN-1e-4
dis_D_lr = 1e-4 # SeqGAN,LeakGAN-1e-2, RelGAN-1e-4
clas_lr = 1e-3
clip_norm = 5.0
clip_param = 0.4
clip = True
pre_log_step = 10
adv_log_step = 20
train_data = 'dataset/' + dataset + '.txt'
test_data = 'dataset/testdata/' + dataset + '_test.txt'
cat_train_data = 'dataset/' + dataset + '_cat{}.txt'
cat_test_data = 'dataset/testdata/' + dataset + '_cat{}_test.txt'
# ===Metrics===
use_nll_oracle = True
use_nll_gen = True
use_nll_div = True
use_bleu = True
use_self_bleu = True
use_clas_acc = True
use_ppl = False
# ===Generator===
ADV_g_step = 1 # 1
rollout_num = 16 # 4
gen_embed_dim = 32 # 32
gen_hidden_dim = 32 # 32
goal_size = 16 # LeakGAN-16
step_size = 4 # LeakGAN-4
mem_slots = 1 # RelGAN-1
num_heads = 2 # RelGAN-2
head_size = 256 # RelGAN-256
# ===Discriminator===
d_step = 5 # SeqGAN-50, LeakGAN-5
d_epoch = 3 # SeqGAN,LeakGAN-3
ADV_d_step = 5 # SeqGAN,LeakGAN,RelGAN-5
ADV_d_epoch = 3 # SeqGAN,LeakGAN-3
dis_embed_dim = 64
dis_hidden_dim = 64
num_rep = 64 # RelGAN
# ===log===
log_time_str = strftime("%m%d_%H%M_%S", localtime())
log_filename = strftime("log/log_%s" % log_time_str)
if os.path.exists(log_filename + '.txt'):
i = 2
while True:
if not os.path.exists(log_filename + '_%d' % i + '.txt'):
log_filename = log_filename + '_%d' % i
break
i += 1
log_filename = log_filename + '.txt'
# Automatically choose GPU or CPU
# if torch.cuda.is_available() and torch.cuda.device_count() > 0:
# os.system('nvidia-smi -q -d Utilization > gpu')
# with open('gpu', 'r') as _tmpfile:
# util_gpu = list(map(int, re.findall(r'Gpu\s+:\s*(\d+)\s*%', _tmpfile.read())))
# os.remove('gpu')
# if len(util_gpu):
# device = util_gpu.index(min(util_gpu))
# else:
# device = 0
# else:
device = -1
# device=1
# print('device: ', device)
torch.cuda.set_device(device)
# ===Save Model and samples===
save_root = 'save/{}/{}/{}_{}_lt-{}_sl{}_temp{}_scale{}_T{}/'.format(time.strftime("%Y%m%d"),
dataset, run_model, model_type,
loss_type, max_seq_len,
temperature,temp_scale,
log_time_str)
save_samples_root = save_root + 'samples/'
save_model_root = save_root + 'models/'
oracle_state_dict_path = 'pretrain/oracle_data/oracle_lstm.pt'
oracle_samples_path = 'pretrain/oracle_data/oracle_lstm_samples_{}.pt'
multi_oracle_state_dict_path = 'pretrain/oracle_data/oracle{}_lstm.pt'
multi_oracle_samples_path = 'pretrain/oracle_data/oracle{}_lstm_samples_{}.pt'
pretrain_root = 'pretrain/{}/'.format(dataset if if_real_data else 'oracle_data')
pretrained_gen_path = pretrain_root + 'gen_MLE_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type, max_seq_len,
samples_num)
pretrained_dis_path = pretrain_root + 'dis_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type, max_seq_len,
samples_num)
pretrained_clas_path = pretrain_root + 'clas_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type, max_seq_len,
samples_num)
signal_file = 'run_signal.txt'
tips = ''
# Init settings according to parser
def init_param(opt):
global run_model, model_type, loss_type, GP, LAMBDA, CUDA, device, data_shuffle, samples_num, vocab_size, \
MLE_train_epoch, ADV_train_epoch, inter_epoch, batch_size, max_seq_len, start_letter, padding_idx, \
gen_lr, gen_adv_lr, dis_lr, dis_D_lr, clip_norm, clip, clip_param, pre_log_step, adv_log_step, train_data, test_data, temp_adpt, \
temperature, temp_scale, oracle_pretrain, gen_pretrain, dis_pretrain, ADV_g_step, rollout_num, gen_embed_dim, \
gen_hidden_dim, goal_size, step_size, mem_slots, num_heads, head_size, d_step, d_epoch, \
ADV_d_step, ADV_d_epoch, dis_embed_dim, dis_hidden_dim, num_rep, log_filename, save_root, \
signal_file, tips, save_samples_root, save_model_root, if_real_data, pretrained_gen_path, \
pretrained_dis_path, pretrain_root, if_test, dataset, PRE_clas_epoch, oracle_samples_path, \
pretrained_clas_path, gen_init, dis_init, multi_oracle_samples_path, k_label, cat_train_data, cat_test_data, \
use_nll_oracle, use_nll_gen, use_nll_div, use_bleu, use_self_bleu, use_clas_acc, use_ppl
if_test = True if opt.if_test == 1 else False
run_model = opt.run_model
k_label = opt.k_label
dataset = opt.dataset
model_type = opt.model_type
loss_type = opt.loss_type
GP = opt.GP
LAMBDA = opt.LAMBDA
if_real_data = True if opt.if_real_data == 1 else False
CUDA = True if opt.cuda == 1 else False
device = opt.device
data_shuffle = opt.shuffle
gen_init = opt.gen_init
dis_init = opt.dis_init
samples_num = opt.samples_num
vocab_size = opt.vocab_size
MLE_train_epoch = opt.mle_epoch
PRE_clas_epoch = opt.clas_pre_epoch
ADV_train_epoch = opt.adv_epoch
inter_epoch = opt.inter_epoch
batch_size = opt.batch_size
max_seq_len = opt.max_seq_len
start_letter = opt.start_letter
padding_idx = opt.padding_idx
gen_lr = opt.gen_lr
gen_adv_lr = opt.gen_adv_lr
dis_lr = opt.dis_lr
dis_D_lr = opt.dis_D_lr
clip_norm = opt.clip_norm
clip = opt.clip
clip_param = opt.clip_param
pre_log_step = opt.pre_log_step
adv_log_step = opt.adv_log_step
temp_adpt = opt.temp_adpt
temperature = opt.temperature
temp_scale = opt.temp_scale
oracle_pretrain = True if opt.ora_pretrain == 1 else False
gen_pretrain = True if opt.gen_pretrain == 1 else False
dis_pretrain = True if opt.dis_pretrain == 1 else False
ADV_g_step = opt.adv_g_step
rollout_num = opt.rollout_num
gen_embed_dim = opt.gen_embed_dim
gen_hidden_dim = opt.gen_hidden_dim
goal_size = opt.goal_size
step_size = opt.step_size
mem_slots = opt.mem_slots
num_heads = opt.num_heads
head_size = opt.head_size
d_step = opt.d_step
d_epoch = opt.d_epoch
ADV_d_step = opt.adv_d_step
ADV_d_epoch = opt.adv_d_epoch
dis_embed_dim = opt.dis_embed_dim
dis_hidden_dim = opt.dis_hidden_dim
num_rep = opt.num_rep
use_nll_oracle = True if opt.use_nll_oracle == 1 else False
use_nll_gen = True if opt.use_nll_gen == 1 else False
use_nll_div = True if opt.use_nll_div == 1 else False
use_bleu = True if opt.use_bleu == 1 else False
use_self_bleu = True if opt.use_self_bleu == 1 else False
use_clas_acc = True if opt.use_clas_acc == 1 else False
use_ppl = True if opt.use_ppl == 1 else False
log_filename = opt.log_file
signal_file = opt.signal_file
tips = opt.tips
# CUDA device
torch.cuda.set_device(device)
# Save path
save_root = 'save/{}/{}/{}_{}_lt-{}_sl{}_temp{}_scale{}_T{}/'.format(time.strftime("%Y%m%d"),
dataset, run_model, model_type,
loss_type, max_seq_len,
temperature,temp_scale,
log_time_str)
save_samples_root = save_root + 'samples/'
save_model_root = save_root + 'models/'
train_data = 'dataset/' + dataset + '.txt'
test_data = 'dataset/testdata/' + dataset + '_test.txt'
cat_train_data = 'dataset/' + dataset + '_cat{}.txt'
cat_test_data = 'dataset/testdata/' + dataset + '_cat{}_test.txt'
if max_seq_len == 40:
oracle_samples_path = 'pretrain/oracle_data/oracle_lstm_samples_{}_sl40.pt'
multi_oracle_samples_path = 'pretrain/oracle_data/oracle{}_lstm_samples_{}_sl40.pt'
pretrain_root = 'pretrain/{}/'.format(dataset if if_real_data else 'oracle_data')
pretrained_gen_path = pretrain_root + 'gen_MLE_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type,
max_seq_len, samples_num)
pretrained_dis_path = pretrain_root + 'dis_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type, max_seq_len,
samples_num)
pretrained_clas_path = pretrain_root + 'clas_pretrain_{}_{}_sl{}_sn{}.pt'.format(run_model, model_type, max_seq_len,
samples_num)
# Assertion
assert k_label >= 2, 'Error: k_label = {}, which should be >=2!'.format(k_label)
# Create Directory
dir_list = ['save', 'savefig', 'log', 'pretrain', 'dataset',
'pretrain/{}'.format(dataset if if_real_data else 'oracle_data')]
if not if_test:
dir_list.extend([save_root, save_samples_root, save_model_root])
for d in dir_list:
if not os.path.exists(d):
os.makedirs(d)
| 11,232 | 38.139373 | 138 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/SentiGAN_D.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : SentiGAN_D.py
# @Time : Created at 2019-07-26
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch.nn as nn
from models.discriminator import CNNDiscriminator, CNNClassifier
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [200, 200, 200, 200]
clas_filter_sizes = [2, 3, 4, 5]
clas_num_filters = [200]
class SentiGAN_D(CNNDiscriminator):
def __init__(self, k_label, embed_dim, vocab_size, padding_idx, gpu=False, dropout=0.2):
super(SentiGAN_D, self).__init__(embed_dim, vocab_size, dis_filter_sizes, dis_num_filters, padding_idx, gpu,
dropout)
self.feature2out = nn.Linear(self.feature_dim, k_label + 1)
self.init_params()
# Classifier
class SentiGAN_C(CNNClassifier):
def __init__(self, k_label, embed_dim, max_seq_len, num_rep, vocab_size, padding_idx, gpu=False, dropout=0.25):
super(SentiGAN_C, self).__init__(k_label, embed_dim, max_seq_len, num_rep, vocab_size, clas_filter_sizes,
clas_num_filters, padding_idx, gpu, dropout)
# Use Glove
# self.embeddings.from_pretrained(build_embedding_matrix(cfg.dataset))
| 1,335 | 33.25641 | 116 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/JSDGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : JSDGAN_G.py
# @Time : Created at 2019/11/17
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
from models.generator import LSTMGenerator
class JSDGAN_G(LSTMGenerator):
def __init__(self, mem_slots, num_heads, head_size, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx,
gpu=False):
super(JSDGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'jsdgan'
# RMC
# self.hidden_dim = mem_slots * num_heads * head_size
# self.lstm = RelationalMemory(mem_slots=mem_slots, head_size=head_size, input_size=embedding_dim,
# num_heads=num_heads, return_all_outputs=True)
# self.lstm2out = nn.Linear(self.hidden_dim, vocab_size)
#
# def init_hidden(self, batch_size=cfg.batch_size):
# """init RMC memory"""
# memory = self.lstm.initial_state(batch_size)
# memory = self.lstm.repackage_hidden(memory) # detch memory at first
# return memory.cuda() if self.gpu else memory
def JSD_loss(self, inp, target):
"""
Returns a JSDGAN loss
:param inp: batch_size x seq_len, inp should be target with <s> (start letter) prepended
:param target: batch_size x seq_len
:return loss: loss to optimize
"""
batch_size, seq_len = inp.size()
hidden = self.init_hidden(batch_size)
pred = self.forward(inp, hidden).view(batch_size, self.max_seq_len, self.vocab_size)
target_onehot = F.one_hot(target, self.vocab_size).float() # batch_size * seq_len * vocab_size
pred = torch.sum(pred * target_onehot, dim=-1) # batch_size * seq_len
# calculate probabilities of sentences
prob_gen = torch.exp(torch.sum(pred, dim=-1).double()) # sum of log prob
prob_gen = self.min_max_normal(prob_gen).clamp(min=1e-10)
prob_data = torch.DoubleTensor([1 / batch_size] * prob_gen.size(0))
if self.gpu:
prob_data = prob_data.cuda()
# calculate the reward
reward = torch.log(1. - torch.div(prob_data, prob_data + prob_gen)) # batch_size
# check if nan
if torch.isnan(reward).sum() > 0:
print('Reward is nan!!!')
exit(1)
loss = torch.sum((prob_gen * reward).detach() * torch.sum(pred.double(), dim=-1))
return loss
def min_max_normal(self, prob):
return torch.div(prob - torch.min(prob), torch.clamp(torch.max(prob) - torch.min(prob), min=1e-78))
def sigmoid_normal(self, prob):
"""push prob either close to 0 or 1"""
return torch.sigmoid((prob - 0.5) * 20)
| 2,889 | 37.533333 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/SentiGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : SentiGAN_G.py
# @Time : Created at 2019-07-26
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
from models.generator import LSTMGenerator
class SentiGAN_G(LSTMGenerator):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(SentiGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'sentigan'
def forward(self, inp, hidden, need_hidden=False, use_log=True):
"""
Embeds input and applies LSTM
:param inp: batch_size * seq_len
:param hidden: (h, c)
:param need_hidden: if return hidden, use for sampling
"""
emb = self.embeddings(inp) # batch_size * len * embedding_dim
if len(inp.size()) == 1:
emb = emb.unsqueeze(1) # batch_size * 1 * embedding_dim
out, hidden = self.lstm(emb, hidden) # out: batch_size * seq_len * hidden_dim
out = out.contiguous().view(-1, self.hidden_dim) # out: (batch_size * len) * hidden_dim
out = self.lstm2out(out) # batch_size * seq_len * vocab_size
# out = self.temperature * out # temperature
if use_log:
pred = F.log_softmax(out, dim=-1)
else:
pred = F.softmax(out, dim=-1)
if need_hidden:
return pred, hidden
else:
return pred
def batchPGLoss(self, inp, target, reward):
"""
Returns a policy gradient loss
:param inp: batch_size x seq_len, inp should be target with <s> (start letter) prepended
:param target: batch_size x seq_len
:param reward: batch_size (discriminator reward for each sentence, applied to each token of the corresponding sentence)
:return loss: policy loss
"""
batch_size, seq_len = inp.size()
hidden = self.init_hidden(batch_size)
out = self.forward(inp, hidden, use_log=False).view(batch_size, self.max_seq_len, self.vocab_size)
target_onehot = F.one_hot(target, self.vocab_size).float() # batch_size * seq_len * vocab_size
pred = torch.sum(out * target_onehot, dim=-1) # batch_size * seq_len
loss = -torch.sum(pred * (1 - reward))
return loss
| 2,451 | 36.151515 | 127 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/discriminator.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : config.py
# @Time : Created at 2019-03-18
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from utils.helpers import truncated_normal_
class CNNDiscriminator(nn.Module):
def __init__(self, embed_dim, vocab_size, filter_sizes, num_filters, padding_idx, gpu=False,
dropout=0.2):
super(CNNDiscriminator, self).__init__()
self.embedding_dim = embed_dim
self.vocab_size = vocab_size
self.padding_idx = padding_idx
self.feature_dim = sum(num_filters)
self.gpu = gpu
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.convs = nn.ModuleList([
nn.Conv2d(1, n, (f, embed_dim)) for (n, f) in zip(num_filters, filter_sizes)
])
self.highway = nn.Linear(self.feature_dim, self.feature_dim)
self.feature2out = nn.Linear(self.feature_dim, 2)
self.dropout = nn.Dropout(dropout)
self.init_params()
def forward(self, inp):
"""
Get final predictions of discriminator
:param inp: batch_size * seq_len
:return: pred: batch_size * 2
"""
feature = self.get_feature(inp)
pred = self.feature2out(self.dropout(feature))
return pred
def get_feature(self, inp):
"""
Get feature vector of given sentences
:param inp: batch_size * max_seq_len
:return: batch_size * feature_dim
"""
emb = self.embeddings(inp).unsqueeze(1) # batch_size * 1 * max_seq_len * embed_dim
convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs] # [batch_size * num_filter * length]
pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in convs] # [batch_size * num_filter]
pred = torch.cat(pools, 1) # tensor: batch_size * feature_dim
highway = self.highway(pred)
pred = torch.sigmoid(highway) * F.relu(highway) + (1. - torch.sigmoid(highway)) * pred # highway
return pred
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.dis_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.dis_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.dis_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
class GRUDiscriminator(nn.Module):
def __init__(self, embedding_dim, vocab_size, hidden_dim, feature_dim, max_seq_len, padding_idx,
gpu=False, dropout=0.2):
super(GRUDiscriminator, self).__init__()
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.padding_idx = padding_idx
self.gpu = gpu
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.gru = nn.GRU(embedding_dim, hidden_dim, num_layers=2, bidirectional=True, dropout=dropout)
self.gru2hidden = nn.Linear(2 * 2 * hidden_dim, feature_dim)
self.feature2out = nn.Linear(feature_dim, 2)
self.dropout = nn.Dropout(dropout)
self.init_params()
def init_hidden(self, batch_size):
h = autograd.Variable(torch.zeros(2 * 2 * 1, batch_size, self.hidden_dim))
if self.gpu:
return h.cuda()
else:
return h
def forward(self, inp):
"""
Get final feature of discriminator
:param inp: batch_size * seq_len
:return pred: batch_size * 2
"""
feature = self.get_feature(inp)
pred = self.feature2out(self.dropout(feature))
return pred
def get_feature(self, inp):
"""
Get feature vector of given sentences
:param inp: batch_size * max_seq_len
:return: batch_size * feature_dim
"""
hidden = self.init_hidden(inp.size(0))
emb = self.embeddings(input) # batch_size * seq_len * embedding_dim
emb = emb.permute(1, 0, 2) # seq_len * batch_size * embedding_dim
_, hidden = self.gru(emb, hidden) # 4 * batch_size * hidden_dim
hidden = hidden.permute(1, 0, 2).contiguous() # batch_size * 4 * hidden_dim
out = self.gru2hidden(hidden.view(-1, 4 * self.hidden_dim)) # batch_size * 4 * hidden_dim
feature = torch.tanh(out) # batch_size * feature_dim
return feature
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.dis_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.dis_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.dis_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
# Classifier
class CNNClassifier(CNNDiscriminator):
def __init__(self, k_label, embed_dim, max_seq_len, num_rep, vocab_size, filter_sizes, num_filters, padding_idx,
gpu=False, dropout=0.25):
super(CNNClassifier, self).__init__(embed_dim, vocab_size, filter_sizes, num_filters, padding_idx,
gpu, dropout)
self.k_label = k_label
self.embed_dim = embed_dim
self.max_seq_len = max_seq_len
self.feature_dim = sum(num_filters)
self.emb_dim_single = int(embed_dim / num_rep)
self.embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx)
self.convs = nn.ModuleList([
nn.Conv2d(1, n, (f, embed_dim)) for (n, f) in zip(num_filters, filter_sizes)
]) # vanilla
# self.convs = nn.ModuleList([
# nn.Conv2d(1, n, (f, self.emb_dim_single), stride=(1, self.emb_dim_single)) for (n, f) in
# zip(num_filters, filter_sizes)
# ]) # RelGAN
self.highway = nn.Linear(self.feature_dim, self.feature_dim)
self.feature2out = nn.Linear(self.feature_dim, 100)
self.out2logits = nn.Linear(100, k_label) # vanilla
# self.out2logits = nn.Linear(num_rep * 100, k_label) # RelGAN
self.dropout = nn.Dropout(dropout)
self.init_params()
def forward(self, inp):
"""
Get logits of discriminator
:param inp: batch_size * seq_len * vocab_size
:return logits: [batch_size * num_rep] (1-D tensor)
"""
emb = self.embeddings(inp).unsqueeze(1) # batch_size * 1 * max_seq_len * embed_dim
# vanilla
convs = [F.relu(conv(emb)).squeeze(3) for conv in self.convs] # [batch_size * num_filter * length]
pools = [F.max_pool1d(conv, conv.size(2)).squeeze(2) for conv in convs] # [batch_size * num_filter]
# RelGAN
# cons = [F.relu(conv(emb)) for conv in self.convs] # [batch_size * num_filter * (seq_len-k_h+1) * num_rep]
# pools = [F.max_pool2d(con, (con.size(2), 1)).squeeze(2) for con in cons] # [batch_size * num_filter * num_rep]
pred = torch.cat(pools, 1) # batch_size * feature_dim
# pred = pred.permute(0, 2, 1).contiguous().view(-1, self.feature_dim) # RelGAN
highway = self.highway(pred)
pred = torch.sigmoid(highway) * F.relu(highway) + (1. - torch.sigmoid(highway)) * pred # highway, same dim
pred = self.feature2out(self.dropout(pred))
logits = self.out2logits(self.dropout(pred)).squeeze(1) # vanilla, batch_size * k_label
# logits = self.out2logits(self.dropout(pred.view(inp.size(0), -1))).squeeze(1) # RelGAN, batch_size * k_label
return logits
| 8,100 | 39.303483 | 121 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/relational_rnn_general.py | import torch
import torch.nn.functional as F
from torch import nn
# this class largely follows the official sonnet implementation
# https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/relational_memory.py
class RelationalMemory(nn.Module):
"""
Constructs a `RelationalMemory` object.
This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.
Args:
mem_slots: The total number of memory slots to use.
head_size: The size of an attention head.
input_size: The size of input per step. i.e. the dimension of each input vector
num_heads: The number of attention heads to use. Defaults to 1.
num_blocks: Number of times to compute attention per time step. Defaults
to 1.
forget_bias: Bias to use for the forget gate, assuming we are using
some form of gating. Defaults to 1.
input_bias: Bias to use for the input gate, assuming we are using
some form of gating. Defaults to 0.
gate_style: Whether to use per-element gating ('unit'),
per-memory slot gating ('memory'), or no gating at all (None).
Defaults to `unit`.
attention_mlp_layers: Number of layers to use in the post-attention
MLP. Defaults to 2.
key_size: Size of vector to use for key & query vectors in the attention
computation. Defaults to None, in which case we use `head_size`.
# NEW flag for this class
return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.
Raises:
ValueError: gate_style not one of [None, 'memory', 'unit'].
ValueError: num_blocks is < 1.
ValueError: attention_mlp_layers is < 1.
"""
def __init__(self, mem_slots, head_size, input_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,
gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False):
super(RelationalMemory, self).__init__()
########## generic parameters for RMC ##########
self.mem_slots = mem_slots
self.head_size = head_size
self.num_heads = num_heads
self.mem_size = self.head_size * self.num_heads
# a new fixed params needed for pytorch port of RMC
# +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input
# so if the mem_slots = 1, this value is 2
self.mem_slots_plus_input = self.mem_slots + 1
if num_blocks < 1:
raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))
self.num_blocks = num_blocks
if gate_style not in ['unit', 'memory', None]:
raise ValueError(
'gate_style must be one of [\'unit\', \'memory\', None]. got: '
'{}.'.format(gate_style))
self.gate_style = gate_style
if attention_mlp_layers < 1:
raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(
attention_mlp_layers))
self.attention_mlp_layers = attention_mlp_layers
self.key_size = key_size if key_size else self.head_size
########## parameters for multihead attention ##########
# value_size is same as head_size
self.value_size = self.head_size
# total size for query-key-value
self.qkv_size = 2 * self.key_size + self.value_size
self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F
# each head has qkv_sized linear projector
# just using one big param is more efficient, rather than this line
# self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]
self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)
self.qkv_layernorm = nn.LayerNorm([self.mem_slots_plus_input, self.total_qkv_size])
# used for attend_over_memory function
self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)
self.attended_memory_layernorm = nn.LayerNorm([self.mem_slots_plus_input, self.mem_size])
self.attended_memory_layernorm2 = nn.LayerNorm([self.mem_slots_plus_input, self.mem_size])
########## parameters for initial embedded input projection ##########
self.input_size = input_size
self.input_projector = nn.Linear(self.input_size, self.mem_size)
########## parameters for gating ##########
self.num_gates = 2 * self.calculate_gate_size()
self.input_gate_projector = nn.Linear(self.mem_size, self.num_gates)
self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)
# trainable scalar gate bias tensors
self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))
self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))
########## number of outputs returned #####
self.return_all_outputs = return_all_outputs
def repackage_hidden(self, h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
# needed for truncated BPTT, called at every batch forward pass
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(self.repackage_hidden(v) for v in h)
def initial_state(self, batch_size, trainable=False):
"""
Creates the initial memory.
We should ensure each row of the memory is initialized to be unique,
so initialize the matrix to be the identity. We then pad or truncate
as necessary so that init_state is of size
(batch_size, self.mem_slots, self.mem_size).
Args:
batch_size: The size of the batch.
trainable: Whether the initial state is trainable. This is always True.
Returns:
init_state: A truncated or padded matrix of size
(batch_size, self.mem_slots, self.mem_size).
"""
init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])
# pad the matrix with zeros
if self.mem_size > self.mem_slots:
difference = self.mem_size - self.mem_slots
pad = torch.zeros((batch_size, self.mem_slots, difference))
init_state = torch.cat([init_state, pad], -1)
# truncation. take the first 'self.mem_size' components
elif self.mem_size < self.mem_slots:
init_state = init_state[:, :, :self.mem_size]
return init_state
def multihead_attention(self, memory):
"""
Perform multi-head attention from 'Attention is All You Need'.
Implementation of the attention mechanism from
https://arxiv.org/abs/1706.03762.
Args:
memory: Memory tensor to perform attention on.
Returns:
new_memory: New memory tensor.
"""
# First, a simple linear projection is used to construct queries
qkv = self.qkv_projector(memory)
# apply layernorm for every dim except the batch dim
qkv = self.qkv_layernorm(qkv)
# mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs
# example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass
# this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style
mem_slots = memory.shape[1] # denoted as N
# split the qkv to multiple heads H
# [B, N, F] => [B, N, H, F/H]
qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)
# [B, N, H, F/H] => [B, H, N, F/H]
qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)
# [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]
q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)
# scale q with d_k, the dimensionality of the key vectors
q *= (self.key_size ** -0.5)
# make it [B, H, N, N]
dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))
weights = F.softmax(dot_product, dim=-1)
# output is [B, H, N, V]
output = torch.matmul(weights, v)
# [B, H, N, V] => [B, N, H, V] => [B, N, H*V]
output_transpose = output.permute(0, 2, 1, 3).contiguous()
new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))
return new_memory
@property
def state_size(self):
return [self.mem_slots, self.mem_size]
@property
def output_size(self):
return self.mem_slots * self.mem_size
def calculate_gate_size(self):
"""
Calculate the gate size from the gate_style.
Returns:
The per sample, per head parameter size of each gate.
"""
if self.gate_style == 'unit':
return self.mem_size
elif self.gate_style == 'memory':
return 1
else: # self.gate_style == None
return 0
def create_gates(self, inputs, memory):
"""
Create input and forget gates for this step using `inputs` and `memory`.
Args:
inputs: Tensor input.
memory: The current state of memory.
Returns:
input_gate: A LSTM-like insert gate.
forget_gate: A LSTM-like forget gate.
"""
# We'll create the input and forget gates at once. Hence, calculate double
# the gate size.
# equation 8: since there is no output gate, h is just a tanh'ed m
memory = torch.tanh(memory)
# sonnet uses this, but i think it assumes time step of 1 for all cases
# if inputs is (B, T, features) where T > 1, this gets incorrect
# inputs = inputs.view(inputs.shape[0], -1)
# fixed implementation
if len(inputs.shape) == 3:
if inputs.shape[1] > 1:
raise ValueError(
"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1")
inputs = inputs.view(inputs.shape[0], -1)
# matmul for equation 4 and 5
# there is no output gate, so equation 6 is not implemented
gate_inputs = self.input_gate_projector(inputs)
gate_inputs = gate_inputs.unsqueeze(dim=1)
gate_memory = self.memory_gate_projector(memory)
else:
raise ValueError("input shape of create_gate function is 2, expects 3")
# this completes the equation 4 and 5
gates = gate_memory + gate_inputs
gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)
input_gate, forget_gate = gates
assert input_gate.shape[2] == forget_gate.shape[2]
# to be used for equation 7
input_gate = torch.sigmoid(input_gate + self.input_bias)
forget_gate = torch.sigmoid(forget_gate + self.forget_bias)
return input_gate, forget_gate
def attend_over_memory(self, memory):
"""
Perform multiheaded attention over `memory`.
Args:
memory: Current relational memory.
Returns:
The attended-over memory.
"""
for _ in range(self.num_blocks):
attended_memory = self.multihead_attention(memory)
# Add a skip connection to the multiheaded attention's input.
memory = self.attended_memory_layernorm(memory + attended_memory)
# add a skip connection to the attention_mlp's input.
attention_mlp = memory
for i, l in enumerate(self.attention_mlp):
attention_mlp = self.attention_mlp[i](attention_mlp)
attention_mlp = F.relu(attention_mlp)
memory = self.attended_memory_layernorm2(memory + attention_mlp)
return memory
def forward_step(self, inputs, memory, treat_input_as_matrix=False):
"""
Forward step of the relational memory core.
Args:
inputs: Tensor input.
memory: Memory output from the previous time step.
treat_input_as_matrix: Optional, whether to treat `input` as a sequence
of matrices. Default to False, in which case the input is flattened
into a vector.
Returns:
output: This time step's output.
next_memory: The next version of memory to use.
"""
if treat_input_as_matrix:
# keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2
inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)
# apply linear layer for dim 2
inputs_reshape = self.input_projector(inputs)
else:
# keep (Batch, ...) dim (0), flatten starting from dim 1
inputs = inputs.view(inputs.shape[0], -1)
# apply linear layer for dim 1
inputs = self.input_projector(inputs)
# unsqueeze the time step to dim 1
inputs_reshape = inputs.unsqueeze(dim=1)
memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)
next_memory = self.attend_over_memory(memory_plus_input)
# cut out the concatenated input vectors from the original memory slots
n = inputs_reshape.shape[1]
next_memory = next_memory[:, :-n, :]
if self.gate_style == 'unit' or self.gate_style == 'memory':
# these gates are sigmoid-applied ones for equation 7
input_gate, forget_gate = self.create_gates(inputs_reshape, memory)
# equation 7 calculation
next_memory = input_gate * torch.tanh(next_memory)
next_memory += forget_gate * memory
output = next_memory.view(next_memory.shape[0], -1)
return output, next_memory
def forward(self, inputs, memory, treat_input_as_matrix=False):
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
# for loop implementation of (entire) recurrent forward pass of the model
# inputs is batch first [batch, seq], and output logit per step is [batch, vocab]
# so the concatenated logits are [seq * batch, vocab]
# targets are flattened [seq, batch] => [seq * batch], so the dimension is correct
# memory = self.repackage_hidden(memory)
logit = 0
logits = []
# shape[1] is seq_lenth T
for idx_step in range(inputs.shape[1]):
logit, memory = self.forward_step(inputs[:, idx_step], memory)
logits.append(logit.unsqueeze(1))
logits = torch.cat(logits, dim=1)
if self.return_all_outputs:
return logits, memory
else:
return logit.unsqueeze(1), memory
# ########## DEBUG: unit test code ##########
# input_size = 32
# seq_length = 20
# batch_size = 32
# num_tokens = 5000
# model = RelationalMemory(mem_slots=1, head_size=512, input_size=input_size, num_heads=2)
# model_memory = model.initial_state(batch_size=batch_size)
#
# # random input
# random_input = torch.randn((32, seq_length, input_size))
# # random targets
# random_targets = torch.randn((32, seq_length, input_size))
#
# # take a one step forward
# logit, next_memory = model(random_input, model_memory)
# print(next_memory.shape)
# print(logit.shape)
| 15,604 | 41.753425 | 142 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/LeakGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : LeakGAN_G.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from utils.helpers import truncated_normal_
dis_num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
goal_out_size = sum(dis_num_filters)
class LeakGAN_G(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, goal_size,
step_size, gpu=False):
super(LeakGAN_G, self).__init__()
self.name = 'leakgan'
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.padding_idx = padding_idx
self.goal_size = goal_size
self.goal_out_size = goal_out_size # equals to total_num_filters
self.step_size = step_size
self.gpu = gpu
self.temperature = 1.5
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.worker = nn.LSTM(embedding_dim, hidden_dim)
self.manager = nn.LSTM(goal_out_size, hidden_dim)
self.work2goal = nn.Linear(hidden_dim, vocab_size * goal_size)
self.mana2goal = nn.Linear(hidden_dim, goal_out_size)
self.goal2goal = nn.Linear(goal_out_size, goal_size, bias=False)
self.goal_init = nn.Parameter(torch.rand((cfg.batch_size, goal_out_size)))
self.init_params()
def forward(self, idx, inp, work_hidden, mana_hidden, feature, real_goal, no_log=False, train=False):
"""
Embeds input and sample on token at a time (seq_len = 1)
:param idx: index of current token in sentence
:param inp: [batch_size]
:param work_hidden: 1 * batch_size * hidden_dim
:param mana_hidden: 1 * batch_size * hidden_dim
:param feature: 1 * batch_size * total_num_filters, feature of current sentence
:param real_goal: batch_size * goal_out_size, real_goal in LeakGAN source code
:param no_log: no log operation
:param train: if train
:return: out, cur_goal, work_hidden, mana_hidden
- out: batch_size * vocab_size
- cur_goal: batch_size * 1 * goal_out_size
"""
emb = self.embeddings(inp).unsqueeze(0) # 1 * batch_size * embed_dim
# Manager
mana_out, mana_hidden = self.manager(feature, mana_hidden) # mana_out: 1 * batch_size * hidden_dim
mana_out = self.mana2goal(mana_out.permute([1, 0, 2])) # batch_size * 1 * goal_out_size
cur_goal = F.normalize(mana_out, dim=-1)
_real_goal = self.goal2goal(real_goal) # batch_size * goal_size
_real_goal = F.normalize(_real_goal, p=2, dim=-1).unsqueeze(-1) # batch_size * goal_size * 1
# Worker
work_out, work_hidden = self.worker(emb, work_hidden) # work_out: 1 * batch_size * hidden_dim
work_out = self.work2goal(work_out).view(-1, self.vocab_size,
self.goal_size) # batch_size * vocab_size * goal_size
# Sample token
out = torch.matmul(work_out, _real_goal).squeeze(-1) # batch_size * vocab_size
# Temperature control
if idx > 1:
if train:
temperature = 1.0
else:
temperature = self.temperature
else:
temperature = self.temperature
out = temperature * out
if no_log:
out = F.softmax(out, dim=-1)
else:
out = F.log_softmax(out, dim=-1)
return out, cur_goal, work_hidden, mana_hidden
def sample(self, num_samples, batch_size, dis, start_letter=cfg.start_letter, train=False):
"""
Samples the network and returns num_samples samples of length max_seq_len.
:return: samples: batch_size * max_seq_len
"""
num_batch = num_samples // batch_size + 1 if num_samples != batch_size else 1
samples = torch.zeros(num_batch * batch_size, self.max_seq_len).long() # larger than num_samples
fake_sentences = torch.zeros((batch_size, self.max_seq_len))
for b in range(num_batch):
leak_sample, _, _, _ = self.forward_leakgan(fake_sentences, dis, if_sample=True, no_log=False
, start_letter=start_letter, train=False)
assert leak_sample.shape == (batch_size, self.max_seq_len)
samples[b * batch_size:(b + 1) * batch_size, :] = leak_sample
samples = samples[:num_samples, :]
return samples # cut to num_samples
def pretrain_loss(self, target, dis, start_letter=cfg.start_letter):
"""
Returns the pretrain_generator Loss for predicting target sequence.
Inputs: target, dis, start_letter
- target: batch_size * seq_len
"""
batch_size, seq_len = target.size()
_, feature_array, goal_array, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter)
# Manager loss
mana_cos_loss = self.manager_cos_loss(batch_size, feature_array,
goal_array) # batch_size * (seq_len / step_size)
manager_loss = -torch.sum(mana_cos_loss) / (batch_size * (seq_len // self.step_size))
# Worker loss
work_nll_loss = self.worker_nll_loss(target, leak_out_array) # batch_size * seq_len
work_loss = torch.sum(work_nll_loss) / (batch_size * seq_len)
return manager_loss, work_loss
def adversarial_loss(self, target, rewards, dis, start_letter=cfg.start_letter):
"""
Returns a pseudo-loss that gives corresponding policy gradients (on calling .backward()).
Inspired by the example in http://karpathy.github.io/2016/05/31/rl/
Inputs: target, rewards, dis, start_letter
- target: batch_size * seq_len
- rewards: batch_size * seq_len (discriminator rewards for each token)
"""
batch_size, seq_len = target.size()
_, feature_array, goal_array, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter, train=True)
# Manager Loss
t0 = time.time()
mana_cos_loss = self.manager_cos_loss(batch_size, feature_array,
goal_array) # batch_size * (seq_len / step_size)
mana_loss = -torch.sum(rewards * mana_cos_loss) / (batch_size * (seq_len // self.step_size))
# Worker Loss
work_nll_loss = self.worker_nll_loss(target, leak_out_array) # batch_size * seq_len
work_cos_reward = self.worker_cos_reward(feature_array, goal_array) # batch_size * seq_len
work_loss = -torch.sum(work_nll_loss * work_cos_reward) / (batch_size * seq_len)
return mana_loss, work_loss
def manager_cos_loss(self, batch_size, feature_array, goal_array):
"""
Get manager cosine distance loss
:return cos_loss: batch_size * (seq_len / step_size)
"""
# ===My implements===
# offset_feature = feature_array[:, 4:, :]
# # 不记录最后四个feature的变化
# all_feature = feature_array[:, :-4, :]
# all_goal = goal_array[:, :-4, :]
# sub_feature = offset_feature - all_feature
#
# # L2 normalization
# sub_feature = F.normalize(sub_feature, p=2, dim=-1)
# all_goal = F.normalize(all_goal, p=2, dim=-1)
#
# cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1) # batch_size * (seq_len - 4)
#
# return cos_loss
# ===LeakGAN origin===
# get sub_feature and real_goal
# batch_size, seq_len = sentences.size()
sub_feature = torch.zeros(batch_size, self.max_seq_len // self.step_size, self.goal_out_size)
real_goal = torch.zeros(batch_size, self.max_seq_len // self.step_size, self.goal_out_size)
for i in range(self.max_seq_len // self.step_size):
idx = i * self.step_size
sub_feature[:, i, :] = feature_array[:, idx + self.step_size, :] - feature_array[:, idx, :]
if i == 0:
real_goal[:, i, :] = self.goal_init[:batch_size, :]
else:
idx = (i - 1) * self.step_size + 1
real_goal[:, i, :] = torch.sum(goal_array[:, idx:idx + 4, :], dim=1)
# L2 noramlization
sub_feature = F.normalize(sub_feature, p=2, dim=-1)
real_goal = F.normalize(real_goal, p=2, dim=-1)
cos_loss = F.cosine_similarity(sub_feature, real_goal, dim=-1)
return cos_loss
def worker_nll_loss(self, target, leak_out_array):
"""
Get NLL loss for worker
:return loss: batch_size * seq_len
"""
loss_fn = nn.NLLLoss(reduction='none')
loss = loss_fn(leak_out_array.permute([0, 2, 1]), target)
return loss
def worker_cos_reward(self, feature_array, goal_array):
"""
Get reward for worker (cosine distance)
:return: cos_loss: batch_size * seq_len
"""
for i in range(int(self.max_seq_len / self.step_size)):
real_feature = feature_array[:, i * self.step_size, :].unsqueeze(1).expand((-1, self.step_size, -1))
feature_array[:, i * self.step_size:(i + 1) * self.step_size, :] = real_feature
if i > 0:
sum_goal = torch.sum(goal_array[:, (i - 1) * self.step_size:i * self.step_size, :], dim=1, keepdim=True)
else:
sum_goal = goal_array[:, 0, :].unsqueeze(1)
goal_array[:, i * self.step_size:(i + 1) * self.step_size, :] = sum_goal.expand((-1, self.step_size, -1))
offset_feature = feature_array[:, 1:, :] # f_{t+1}, batch_size * seq_len * goal_out_size
goal_array = goal_array[:, :self.max_seq_len, :] # batch_size * seq_len * goal_out_size
sub_feature = offset_feature - goal_array
# L2 normalization
sub_feature = F.normalize(sub_feature, p=2, dim=-1)
all_goal = F.normalize(goal_array, p=2, dim=-1)
cos_loss = F.cosine_similarity(sub_feature, all_goal, dim=-1) # batch_size * seq_len
return cos_loss
def forward_leakgan(self, sentences, dis, if_sample, no_log=False, start_letter=cfg.start_letter, train=False):
"""
Get all feature and goals according to given sentences
:param sentences: batch_size * max_seq_len, not include start token
:param dis: discriminator model
:param if_sample: if use to sample token
:param no_log: if use log operation
:param start_letter:
:param train: if use temperature parameter
:return samples, feature_array, goal_array, leak_out_array:
- samples: batch_size * max_seq_len
- feature_array: batch_size * (max_seq_len + 1) * total_num_filter
- goal_array: batch_size * (max_seq_len + 1) * goal_out_size
- leak_out_array: batch_size * max_seq_len * vocab_size
"""
batch_size, seq_len = sentences.size()
feature_array = torch.zeros((batch_size, seq_len + 1, self.goal_out_size))
goal_array = torch.zeros((batch_size, seq_len + 1, self.goal_out_size))
leak_out_array = torch.zeros((batch_size, seq_len + 1, self.vocab_size))
samples = torch.zeros(batch_size, seq_len + 1).long()
work_hidden = self.init_hidden(batch_size)
mana_hidden = self.init_hidden(batch_size)
leak_inp = torch.LongTensor([start_letter] * batch_size)
# dis_inp = torch.LongTensor([start_letter] * batch_size)
real_goal = self.goal_init[:batch_size, :]
if self.gpu:
feature_array = feature_array.cuda()
goal_array = goal_array.cuda()
leak_out_array = leak_out_array.cuda()
goal_array[:, 0, :] = real_goal # g0 = goal_init
for i in range(seq_len + 1):
# Get feature
if if_sample:
dis_inp = samples[:, :seq_len]
else: # to get feature and goal
dis_inp = torch.zeros(batch_size, seq_len).long()
if i > 0:
dis_inp[:, :i] = sentences[:, :i] # cut sentences
leak_inp = sentences[:, i - 1]
if self.gpu:
dis_inp = dis_inp.cuda()
leak_inp = leak_inp.cuda()
feature = dis.get_feature(dis_inp).unsqueeze(0) # !!!note: 1 * batch_size * total_num_filters
feature_array[:, i, :] = feature.squeeze(0)
# Get output of one token
# cur_goal: batch_size * 1 * goal_out_size
out, cur_goal, work_hidden, mana_hidden = self.forward(i, leak_inp, work_hidden, mana_hidden, feature,
real_goal, no_log=no_log, train=train)
leak_out_array[:, i, :] = out
# ===My implement according to paper===
# Update real_goal and save goal
# if 0 < i < 4: # not update when i=0
# real_goal = torch.sum(goal_array, dim=1) # num_samples * goal_out_size
# elif i >= 4:
# real_goal = torch.sum(goal_array[:, i - 4:i, :], dim=1)
# if i > 0:
# goal_array[:, i, :] = cur_goal.squeeze(1) # !!!note: save goal after update last_goal
# ===LeakGAN origin===
# Save goal and update real_goal
goal_array[:, i, :] = cur_goal.squeeze(1)
if i > 0 and i % self.step_size == 0:
real_goal = torch.sum(goal_array[:, i - 3:i + 1, :], dim=1)
if i / self.step_size == 1:
real_goal += self.goal_init[:batch_size, :]
# Sample one token
if not no_log:
out = torch.exp(out)
out = torch.multinomial(out, 1).view(-1) # [batch_size] (sampling from each row)
samples[:, i] = out.data
leak_inp = out
# cut to seq_len
samples = samples[:, :seq_len]
leak_out_array = leak_out_array[:, :seq_len, :]
return samples, feature_array, goal_array, leak_out_array
def batchNLLLoss(self, target, dis, start_letter=cfg.start_letter):
# loss_fn = nn.NLLLoss()
# batch_size, seq_len = target.size()
_, _, _, leak_out_array = self.forward_leakgan(target, dis, if_sample=False, no_log=False,
start_letter=start_letter)
nll_loss = torch.mean(self.worker_nll_loss(target, leak_out_array))
return nll_loss
def init_hidden(self, batch_size=1):
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
def init_goal(self, batch_size):
goal = torch.rand((batch_size, self.goal_out_size)).normal_(std=0.1)
goal = nn.Parameter(goal)
if self.gpu:
return goal.cuda()
else:
return goal
def split_params(self):
mana_params = list()
work_params = list()
mana_params += list(self.manager.parameters())
mana_params += list(self.mana2goal.parameters())
mana_params.append(self.goal_init)
work_params += list(self.embeddings.parameters())
work_params += list(self.worker.parameters())
work_params += list(self.work2goal.parameters())
work_params += list(self.goal2goal.parameters())
return mana_params, work_params
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.gen_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.gen_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.gen_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
| 16,609 | 41.372449 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/RelGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : RelGAN_G.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from models.generator import LSTMGenerator
from models.relational_rnn_general import RelationalMemory
class RelGAN_G(LSTMGenerator):
def __init__(self, mem_slots, num_heads, head_size, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx,
gpu=False):
super(RelGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'relgan'
self.temperature = 1.0 # init value is 1.0
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
if cfg.model_type == 'LSTM':
# LSTM
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, batch_first=True)
self.lstm2out = nn.Linear(self.hidden_dim, vocab_size)
else:
# RMC
self.hidden_dim = mem_slots * num_heads * head_size
self.lstm = RelationalMemory(mem_slots=mem_slots, head_size=head_size, input_size=embedding_dim,
num_heads=num_heads, return_all_outputs=True)
self.lstm2out = nn.Linear(self.hidden_dim, vocab_size)
self.init_params()
pass
def init_hidden(self, batch_size=cfg.batch_size):
if cfg.model_type == 'LSTM':
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
else:
"""init RMC memory"""
memory = self.lstm.initial_state(batch_size)
memory = self.lstm.repackage_hidden(memory) # detch memory at first
return memory.cuda() if self.gpu else memory
def step(self, inp, hidden):
"""
RelGAN step forward
:param inp: [batch_size]
:param hidden: memory size
:return: pred, hidden, next_token, next_token_onehot, next_o
- pred: batch_size * vocab_size, use for adversarial training backward
- hidden: next hidden
- next_token: [batch_size], next sentence token
- next_token_onehot: batch_size * vocab_size, not used yet
- next_o: batch_size * vocab_size, not used yet
"""
emb = self.embeddings(inp).unsqueeze(1)
out, hidden = self.lstm(emb, hidden)
gumbel_t = self.add_gumbel(self.lstm2out(out.squeeze(1)))
next_token = torch.argmax(gumbel_t, dim=1).detach()
# next_token_onehot = F.one_hot(next_token, cfg.vocab_size).float() # not used yet
next_token_onehot = None
pred = F.softmax(gumbel_t * self.temperature, dim=-1) # batch_size * vocab_size
# next_o = torch.sum(next_token_onehot * pred, dim=1) # not used yet
next_o = None
return (pred), hidden, next_token, next_token_onehot, next_o
def sample(self, num_samples, batch_size, one_hot=False, start_letter=cfg.start_letter):
"""
Sample from RelGAN Generator
- one_hot: if return pred of RelGAN, used for adversarial training
:return:
- all_preds: batch_size * seq_len * vocab_size, only use for a batch
- samples: all samples
"""
global all_preds
num_batch = num_samples // batch_size + 1 if num_samples != batch_size else 1
samples = torch.zeros(num_batch * batch_size, self.max_seq_len).long()
if one_hot:
all_preds = torch.zeros(batch_size, self.max_seq_len, self.vocab_size)
if self.gpu:
all_preds = all_preds.cuda()
for b in range(num_batch):
hidden = self.init_hidden(batch_size)
inp = torch.LongTensor([start_letter] * batch_size)
if self.gpu:
inp = inp.cuda()
for i in range(self.max_seq_len):
pred, hidden, next_token, _, _ = self.step(inp, hidden)
samples[b * batch_size:(b + 1) * batch_size, i] = next_token
if one_hot:
all_preds[:, i] = pred
inp = next_token
samples = samples[:num_samples] # num_samples * seq_len
if one_hot:
return all_preds # batch_size * seq_len * vocab_size
return samples
@staticmethod
def add_gumbel(o_t, eps=1e-10, gpu=cfg.CUDA):
"""Add o_t by a vector sampled from Gumbel(0,1)"""
u = torch.zeros(o_t.size())
if gpu:
u = u.cuda()
u.uniform_(0, 1)
g_t = -torch.log(-torch.log(u + eps) + eps)
gumbel_t = o_t + g_t
return gumbel_t
@staticmethod
def hard(y):
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1]).cuda()
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
| 5,316 | 37.528986 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/MaliGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : MaliGAN_G.py
# @Time : Created at 2019/10/17
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
from models.generator import LSTMGenerator
class MaliGAN_G(LSTMGenerator):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(MaliGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'maligan'
def adv_loss(self, inp, target, reward):
"""
Returns a MaliGAN loss
:param inp: batch_size x seq_len, inp should be target with <s> (start letter) prepended
:param target: batch_size x seq_len
:param reward: batch_size (discriminator reward for each sentence, applied to each token of the corresponding sentence)
:return loss: policy loss
"""
batch_size, seq_len = inp.size()
hidden = self.init_hidden(batch_size)
out = self.forward(inp, hidden).view(batch_size, self.max_seq_len, self.vocab_size)
target_onehot = F.one_hot(target, self.vocab_size).float() # batch_size * seq_len * vocab_size
pred = torch.sum(out * target_onehot, dim=-1) # batch_size * seq_len
loss = -torch.sum(pred * reward)
return loss
| 1,450 | 34.390244 | 127 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/generator.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : config.py
# @Time : Created at 2019-03-18
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import torch
import torch.nn as nn
import config as cfg
from utils.helpers import truncated_normal_
class LSTMGenerator(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(LSTMGenerator, self).__init__()
self.name = 'vanilla'
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.padding_idx = padding_idx
self.gpu = gpu
self.temperature = 1.0
self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
self.lstm2out = nn.Linear(hidden_dim, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.init_params()
def forward(self, inp, hidden, need_hidden=False):
"""
Embeds input and applies LSTM
:param inp: batch_size * seq_len
:param hidden: (h, c)
:param need_hidden: if return hidden, use for sampling
"""
emb = self.embeddings(inp) # batch_size * len * embedding_dim
if len(inp.size()) == 1:
emb = emb.unsqueeze(1) # batch_size * 1 * embedding_dim
out, hidden = self.lstm(emb, hidden) # out: batch_size * seq_len * hidden_dim
out = out.contiguous().view(-1, self.hidden_dim) # out: (batch_size * len) * hidden_dim
out = self.lstm2out(out) # (batch_size * seq_len) * vocab_size
# out = self.temperature * out # temperature
pred = self.softmax(out)
if need_hidden:
return pred, hidden
else:
return pred
def sample(self, num_samples, batch_size, start_letter=cfg.start_letter):
"""
Samples the network and returns num_samples samples of length max_seq_len.
:return samples: num_samples * max_seq_length (a sampled sequence in each row)
"""
num_batch = num_samples // batch_size + 1 if num_samples != batch_size else 1
samples = torch.zeros(num_batch * batch_size, self.max_seq_len).long()
# Generate sentences with multinomial sampling strategy
for b in range(num_batch):
hidden = self.init_hidden(batch_size)
inp = torch.LongTensor([start_letter] * batch_size)
if self.gpu:
inp = inp.cuda()
for i in range(self.max_seq_len):
out, hidden = self.forward(inp, hidden, need_hidden=True) # out: batch_size * vocab_size
next_token = torch.multinomial(torch.exp(out), 1) # batch_size * 1 (sampling from each row)
samples[b * batch_size:(b + 1) * batch_size, i] = next_token.view(-1)
inp = next_token.view(-1)
samples = samples[:num_samples]
return samples
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.gen_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.gen_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.gen_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
def init_oracle(self):
for param in self.parameters():
if param.requires_grad:
torch.nn.init.normal_(param, mean=0, std=1)
def init_hidden(self, batch_size=cfg.batch_size):
h = torch.zeros(1, batch_size, self.hidden_dim)
c = torch.zeros(1, batch_size, self.hidden_dim)
if self.gpu:
return h.cuda(), c.cuda()
else:
return h, c
| 4,098 | 36.605505 | 108 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/RelGAN_D.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : RelGAN_D.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.discriminator import CNNDiscriminator
dis_filter_sizes = [2, 3, 4, 5]
dis_num_filters = [300, 300, 300, 300]
class RelGAN_D(CNNDiscriminator):
def __init__(self, embed_dim, max_seq_len, num_rep, vocab_size, padding_idx, gpu=False, dropout=0.25):
super(RelGAN_D, self).__init__(embed_dim, vocab_size, dis_filter_sizes, dis_num_filters, padding_idx,
gpu, dropout)
self.embed_dim = embed_dim
self.max_seq_len = max_seq_len
self.feature_dim = sum(dis_num_filters)
self.emb_dim_single = int(embed_dim / num_rep)
self.embeddings = nn.Linear(vocab_size, embed_dim, bias=False)
self.convs = nn.ModuleList([
nn.Conv2d(1, n, (f, self.emb_dim_single), stride=(1, self.emb_dim_single)) for (n, f) in
zip(dis_num_filters, dis_filter_sizes)
])
self.highway = nn.Linear(self.feature_dim, self.feature_dim)
self.feature2out = nn.Linear(self.feature_dim, 100)
self.out2logits = nn.Linear(100, 1)
self.dropout = nn.Dropout(dropout)
self.init_params()
def forward(self, inp):
"""
Get logits of discriminator
:param inp: batch_size * seq_len * vocab_size
:return logits: [batch_size * num_rep] (1-D tensor)
"""
emb = self.embeddings(inp).unsqueeze(1) # batch_size * 1 * max_seq_len * embed_dim
cons = [F.relu(conv(emb)) for conv in self.convs] # [batch_size * num_filter * (seq_len-k_h+1) * num_rep]
pools = [F.max_pool2d(con, (con.size(2), 1)).squeeze(2) for con in cons] # [batch_size * num_filter * num_rep]
pred = torch.cat(pools, 1)
pred = pred.permute(0, 2, 1).contiguous().view(-1, self.feature_dim) # (batch_size * num_rep) * feature_dim
highway = self.highway(pred)
pred = torch.sigmoid(highway) * F.relu(highway) + (1. - torch.sigmoid(highway)) * pred # highway
pred = self.feature2out(self.dropout(pred))
logits = self.out2logits(pred).squeeze(1) # [batch_size * num_rep]
return logits
| 2,425 | 37.507937 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/models/SeqGAN_G.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : SeqGAN_G.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
from models.generator import LSTMGenerator
class SeqGAN_G(LSTMGenerator):
def __init__(self, embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu=False):
super(SeqGAN_G, self).__init__(embedding_dim, hidden_dim, vocab_size, max_seq_len, padding_idx, gpu)
self.name = 'seqgan'
def batchPGLoss(self, inp, target, reward):
"""
Returns a policy gradient loss
:param inp: batch_size x seq_len, inp should be target with <s> (start letter) prepended
:param target: batch_size x seq_len
:param reward: batch_size (discriminator reward for each sentence, applied to each token of the corresponding sentence)
:return loss: policy loss
"""
batch_size, seq_len = inp.size()
hidden = self.init_hidden(batch_size)
out = self.forward(inp, hidden).view(batch_size, self.max_seq_len, self.vocab_size)
target_onehot = F.one_hot(target, self.vocab_size).float() # batch_size * seq_len * vocab_size
pred = torch.sum(out * target_onehot, dim=-1) # batch_size * seq_len
loss = -torch.sum(pred * reward)
return loss
| 1,456 | 35.425 | 127 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/metrics/clas_acc.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : clas_acc.py
# @Time : Created at 2019/12/4
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
from metrics.basic import Metrics
class ACC(Metrics):
def __init__(self, if_use=True, gpu=True):
super(ACC, self).__init__('clas_acc')
self.if_use = if_use
self.model = None
self.data_loader = None
self.gpu = gpu
def get_score(self):
if not self.if_use:
return 0
assert self.model and self.data_loader, 'Need to reset() before get_score()!'
return self.cal_acc(self.model, self.data_loader)
def reset(self, model=None, data_loader=None):
self.model = model
self.data_loader = data_loader
def cal_acc(self, model, data_loader):
total_acc = 0
total_num = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if self.gpu:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
return round(total_acc / total_num, 4)
| 1,394 | 28.0625 | 85 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/metrics/nll.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : nll.py
# @Time : Created at 2019-05-31
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import config as cfg
from metrics.basic import Metrics
class NLL(Metrics):
def __init__(self, name, if_use=False, gpu=False):
super(NLL, self).__init__(name)
self.if_use = if_use
self.model = None
self.data_loader = None
self.label_i = None
self.leak_dis = None
self.gpu = gpu
self.criterion = nn.NLLLoss()
def get_score(self):
"""note that NLL score need the updated model and data loader each time, use reset() before get_score()"""
if not self.if_use:
return 0
assert self.model and self.data_loader, 'Need to reset() before get_score()!'
if self.leak_dis is not None: # For LeakGAN
return self.cal_nll_with_leak_dis(self.model, self.data_loader, self.leak_dis, self.gpu)
elif self.label_i is not None: # For category text generation
return self.cal_nll_with_label(self.model, self.data_loader, self.label_i,
self.criterion, self.gpu)
else:
return self.cal_nll(self.model, self.data_loader, self.criterion, self.gpu)
def reset(self, model=None, data_loader=None, label_i=None, leak_dis=None):
self.model = model
self.data_loader = data_loader
self.label_i = label_i
self.leak_dis = leak_dis
@staticmethod
def cal_nll(model, data_loader, criterion, gpu=cfg.CUDA):
"""NLL score for general text generation model."""
total_loss = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if gpu:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden)
loss = criterion(pred, target.view(-1))
total_loss += loss.item()
return round(total_loss / len(data_loader), 4)
@staticmethod
def cal_nll_with_label(model, data_loader, label_i, criterion, gpu=cfg.CUDA):
"""NLL score for category text generation model."""
assert type(label_i) == int, 'missing label'
total_loss = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
label = torch.LongTensor([label_i] * data_loader.batch_size)
if gpu:
inp, target, label = inp.cuda(), target.cuda(), label.cuda()
hidden = model.init_hidden(data_loader.batch_size)
if model.name == 'oracle':
pred = model.forward(inp, hidden)
else:
pred = model.forward(inp, hidden, label)
loss = criterion(pred, target.view(-1))
total_loss += loss.item()
return round(total_loss / len(data_loader), 4)
@staticmethod
def cal_nll_with_leak_dis(model, data_loader, leak_dis, gpu=cfg.CUDA):
"""NLL score for LeakGAN."""
total_loss = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if gpu:
inp, target = inp.cuda(), target.cuda()
loss = model.batchNLLLoss(target, leak_dis)
total_loss += loss.item()
return round(total_loss / len(data_loader), 4)
| 3,792 | 37.313131 | 114 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/leakgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : leakgan_instructor.py
# @Time : Created at 2019-06-05
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.LeakGAN_D import LeakGAN_D
from models.LeakGAN_G import LeakGAN_G
from utils import rollout
from utils.data_loader import GenDataIter, DisDataIter
from utils.text_process import tensor_to_tokens, write_tokens
class LeakGANInstructor(BasicInstructor):
def __init__(self, opt):
super(LeakGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = LeakGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, cfg.goal_size, cfg.step_size, cfg.CUDA)
self.dis = LeakGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# optimizer
mana_params, work_params = self.gen.split_params()
mana_opt = optim.Adam(mana_params, lr=cfg.gen_lr)
work_opt = optim.Adam(work_params, lr=cfg.gen_lr)
self.gen_opt = [mana_opt, work_opt]
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
for inter_num in range(cfg.inter_epoch):
self.log.info('>>> Interleaved Round %d...' % inter_num)
self.sig.update() # update signal
if self.sig.pre_sig:
# ===DISCRIMINATOR PRE-TRAINING===
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===GENERATOR MLE TRAINING===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
else:
self.log.info('>>> Stop by pre_signal! Skip to adversarial training...')
break
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (str(self.cal_metrics(fmt_str=True))))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pretraining for the gen
- gen_opt: [mana_opt, work_opt]
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_mana_loss = 0
pre_work_loss = 0
# ===Train===
for i, data in enumerate(self.train_data.loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
mana_loss, work_loss = self.gen.pretrain_loss(target, self.dis)
self.optimize_multi(self.gen_opt, [mana_loss, work_loss])
pre_mana_loss += mana_loss.data.item()
pre_work_loss += work_loss.data.item()
pre_mana_loss = pre_mana_loss / len(self.train_data.loader)
pre_work_loss = pre_work_loss / len(self.train_data.loader)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info('[MLE-GEN] epoch %d : pre_mana_loss = %.4f, pre_work_loss = %.4f, %s' % (
epoch, pre_mana_loss, pre_work_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step, current_k=0):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
rollout_func = rollout.ROLLOUT(self.gen, cfg.CUDA)
adv_mana_loss = 0
adv_work_loss = 0
for step in range(g_step):
with torch.no_grad():
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, self.dis,
train=True) # !!! train=True, the only place
inp, target = GenDataIter.prepare(gen_samples, gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward_leakgan(target, cfg.rollout_num, self.dis,
current_k).cpu() # reward with MC search
mana_loss, work_loss = self.gen.adversarial_loss(target, rewards, self.dis)
# update parameters
self.optimize_multi(self.gen_opt, [mana_loss, work_loss])
adv_mana_loss += mana_loss.data.item()
adv_work_loss += work_loss.data.item()
# ===Test===
self.log.info('[ADV-GEN] adv_mana_loss = %.4f, adv_work_loss = %.4f, %s' % (
adv_mana_loss / g_step, adv_work_loss / g_step, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
d_loss, train_acc = 0, 0
for step in range(d_step):
# prepare loader for training
pos_samples = self.train_data.target
neg_samples = self.gen.sample(cfg.samples_num, cfg.batch_size, self.dis)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f,' % (
phase, step, d_loss, train_acc))
def cal_metrics(self, fmt_str=False):
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen.sample(cfg.samples_num, cfg.batch_size, self.dis)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen.sample(200, cfg.batch_size, self.dis), self.idx2word_dict)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data.tokens)
self.nll_gen.reset(self.gen, self.train_data.loader, leak_dis=self.dis)
self.nll_div.reset(self.gen, gen_data.loader, leak_dis=self.dis)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.ppl.reset(gen_tokens)
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), metric.get_score()) for metric in self.all_metrics])
else:
return [metric.get_score() for metric in self.all_metrics]
def _save(self, phase, epoch):
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phase, epoch)
samples = self.gen.sample(cfg.batch_size, cfg.batch_size, self.dis)
write_tokens(save_sample_path, tensor_to_tokens(samples, self.idx2word_dict))
| 8,968 | 44.527919 | 114 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import numpy as np
import torch
import torch.nn as nn
import config as cfg
from metrics.bleu import BLEU
from metrics.clas_acc import ACC
from metrics.nll import NLL
from metrics.ppl import PPL
from utils.cat_data_loader import CatClasDataIter
from utils.data_loader import GenDataIter
from utils.helpers import Signal, create_logger, get_fixed_temperature
from utils.text_process import load_dict, write_tokens, tensor_to_tokens
class BasicInstructor:
def __init__(self, opt):
self.log = create_logger(__name__, silent=False, to_disk=True,
log_file=cfg.log_filename if cfg.if_test
else [cfg.log_filename, cfg.save_root + 'log.txt'])
self.sig = Signal(cfg.signal_file)
self.opt = opt
self.show_config()
self.clas = None
# load dictionary
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
# Dataloader
try:
self.train_data = GenDataIter(cfg.train_data)
self.test_data = GenDataIter(cfg.test_data, if_test_data=True)
except:
pass
try:
self.train_data_list = [GenDataIter(cfg.cat_train_data.format(i)) for i in range(cfg.k_label)]
self.test_data_list = [GenDataIter(cfg.cat_test_data.format(i), if_test_data=True) for i in
range(cfg.k_label)]
self.clas_data_list = [GenDataIter(cfg.cat_test_data.format(str(i)), if_test_data=True) for i in
range(cfg.k_label)]
self.train_samples_list = [self.train_data_list[i].target for i in range(cfg.k_label)]
self.clas_samples_list = [self.clas_data_list[i].target for i in range(cfg.k_label)]
except:
pass
# Criterion
self.mle_criterion = nn.NLLLoss()
self.dis_criterion = nn.CrossEntropyLoss()
self.clas_criterion = nn.CrossEntropyLoss()
# Optimizer
self.clas_opt = None
# Metrics
self.bleu = BLEU('BLEU', gram=[2, 3, 4, 5], if_use=cfg.use_bleu)
self.nll_gen = NLL('NLL_gen', if_use=cfg.use_nll_gen, gpu=cfg.CUDA)
self.nll_div = NLL('NLL_div', if_use=cfg.use_nll_div, gpu=cfg.CUDA)
self.self_bleu = BLEU('Self-BLEU', gram=[2, 3, 4], if_use=cfg.use_self_bleu)
self.clas_acc = ACC(if_use=cfg.use_clas_acc)
self.ppl = PPL(self.train_data, self.test_data, n_gram=5, if_use=cfg.use_ppl)
self.all_metrics = [self.bleu, self.nll_gen, self.nll_div, self.self_bleu, self.ppl]
def _run(self):
print('Nothing to run in Basic Instructor!')
pass
def _test(self):
pass
def init_model(self):
if cfg.dis_pretrain:
self.log.info(
'Load pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pre-trained generator: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path))
if cfg.CUDA:
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
def train_gen_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden)
loss = criterion(pred, target.view(-1))
self.optimize(optimizer, loss, model)
total_loss += loss.item()
return total_loss / len(data_loader)
def train_dis_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
total_acc = 0
total_num = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
self.optimize(optimizer, loss, model)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
def train_classifier(self, epochs):
"""
Classifier for calculating the classification accuracy metric of category text generation.
Note: the train and test data for the classifier is opposite to the generator.
Because the classifier is to calculate the classification accuracy of the generated samples
where are trained on self.train_samples_list.
Since there's no test data in synthetic data (oracle data), the synthetic data experiments
doesn't need a classifier.
"""
import copy
# Prepare data for Classifier
clas_data = CatClasDataIter(self.clas_samples_list)
eval_clas_data = CatClasDataIter(self.train_samples_list)
max_acc = 0
best_clas = None
for epoch in range(epochs):
c_loss, c_acc = self.train_dis_epoch(self.clas, clas_data.loader, self.clas_criterion,
self.clas_opt)
_, eval_acc = self.eval_dis(self.clas, eval_clas_data.loader, self.clas_criterion)
if eval_acc > max_acc:
best_clas = copy.deepcopy(self.clas.state_dict()) # save the best classifier
max_acc = eval_acc
self.log.info('[PRE-CLAS] epoch %d: c_loss = %.4f, c_acc = %.4f, eval_acc = %.4f, max_eval_acc = %.4f',
epoch, c_loss, c_acc, eval_acc, max_acc)
self.clas.load_state_dict(copy.deepcopy(best_clas)) # Reload the best classifier
@staticmethod
def eval_dis(model, data_loader, criterion):
total_loss = 0
total_acc = 0
total_num = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def optimize_multi(opts, losses):
for i, (opt, loss) in enumerate(zip(opts, losses)):
opt.zero_grad()
loss.backward(retain_graph=True if i < len(opts) - 1 else False)
opt.step()
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
def show_config(self):
self.log.info(100 * '=')
self.log.info('> training arguments:')
for arg in vars(self.opt):
self.log.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
self.log.info(100 * '=')
def cal_metrics(self, fmt_str=False):
"""
Calculate metrics
:param fmt_str: if return format string for logging
"""
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen.sample(200, 200), self.idx2word_dict)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data.tokens)
self.nll_gen.reset(self.gen, self.train_data.loader)
self.nll_div.reset(self.gen, gen_data.loader)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.ppl.reset(gen_tokens)
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), metric.get_score()) for metric in self.all_metrics])
else:
return [metric.get_score() for metric in self.all_metrics]
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen.sample(cfg.samples_num, 8 * cfg.batch_size, label_i=label_i)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen.sample(200, 200, label_i=label_i), self.idx2word_dict)
clas_data = CatClasDataIter([eval_samples], label_i)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data_list[label_i].tokens)
self.nll_gen.reset(self.gen, self.train_data_list[label_i].loader, label_i)
self.nll_div.reset(self.gen, gen_data.loader, label_i)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.clas_acc.reset(self.clas, clas_data.loader)
self.ppl.reset(gen_tokens)
return [metric.get_score() for metric in self.all_metrics]
def comb_metrics(self, fmt_str=False):
all_scores = [self.cal_metrics_with_label(label_i) for label_i in range(cfg.k_label)]
all_scores = np.array(all_scores).T.tolist() # each row for each metric
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), score)
for (metric, score) in zip(self.all_metrics, all_scores)])
return all_scores
def _save(self, phase, epoch):
"""Save model state dict and generator's samples"""
if phase != 'ADV':
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phase, epoch)
samples = self.gen.sample(cfg.batch_size, cfg.batch_size)
write_tokens(save_sample_path, tensor_to_tokens(samples, self.idx2word_dict))
def update_temperature(self, i, N):
self.gen.temperature.data = torch.Tensor([get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)])
if cfg.CUDA:
self.gen.temperature.data = self.gen.temperature.data.cuda()
| 11,155 | 40.626866 | 115 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/relgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : relgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.RelGAN_D import RelGAN_D
from models.RelGAN_G import RelGAN_G
from utils.helpers import get_fixed_temperature, get_losses
from torch import autograd
class RelGANInstructor(BasicInstructor):
def __init__(self, opt):
super(RelGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING (GENERATOR)===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pretrain_generator: {}'.format(cfg.pretrained_gen_path))
# # ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
self.sig.update()
if self.sig.adv_sig:
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator
self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature
progress.set_description(
'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))
# TEST
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (
adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
progress.close()
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (
epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
total_loss = 0
for step in range(g_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
real_samples = F.one_hot(real_samples, cfg.vocab_size).float()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
g_loss, _ = get_losses(d_out_real, d_out_fake, cfg.loss_type)
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss += g_loss.item()
return total_loss / g_step if g_step != 0 else 0
def calc_gradient_penalty(self, real_data, fake_data):
BATCH_SIZE = real_data.shape[0]
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(real_data.shape)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
# disc_interpolates = netD(interpolates)
disc_interpolates = self.dis(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def adv_train_discriminator(self, d_step):
total_loss = 0
for step in range(d_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
real_samples = F.one_hot(real_samples, cfg.vocab_size).float()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
_, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)
if cfg.GP:
gradient_penalty = self.calc_gradient_penalty(real_samples.data, gen_samples.data)
d_loss = d_loss+cfg.LAMBDA*gradient_penalty
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss += d_loss.item()
return total_loss / d_step if d_step != 0 else 0
def update_temperature(self, i, N):
self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
| 7,150 | 40.33526 | 115 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/jsdgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : JSDGAN_instructor.py
# @Time : Created at 2019/11/25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.JSDGAN_G import JSDGAN_G
class JSDGANInstructor(BasicInstructor):
def __init__(self, opt):
super(JSDGANInstructor, self).__init__(opt)
# generator
self.gen = JSDGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
def init_model(self):
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.gen = self.gen.cuda()
def _run(self):
# ===PRE-TRAINING===
# TRAIN GENERATOR
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
for adv_epoch in range(cfg.ADV_train_epoch):
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss = %.4f, %s' % (adv_epoch, g_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
global inp, target
total_loss = 0
for step in range(g_step):
for i, data in enumerate(self.train_data.loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
# ===Train===
adv_loss = self.gen.JSD_loss(inp, target)
self.optimize(self.gen_opt, adv_loss, self.gen)
total_loss += adv_loss.item()
return total_loss
| 3,564 | 34.65 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/maligan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : maligan_instructor.py
# @Time : Created at 2019/11/29
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.MaliGAN_D import MaliGAN_D
from models.MaliGAN_G import MaliGAN_G
from utils.data_loader import GenDataIter, DisDataIter
# noinspection PyUnresolvedReferences
class MaliGANInstructor(BasicInstructor):
def __init__(self, opt):
super(MaliGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = MaliGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA)
self.dis = MaliGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING===
# TRAIN GENERATOR
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (self.cal_metrics(fmt_str=True)))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained by MLE-like objective.
"""
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen.sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = self.get_mali_reward(target)
adv_loss = self.gen.adv_loss(inp, target, rewards)
self.optimize(self.gen_adv_opt, adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: g_loss = %.4f, %s' % (total_g_loss, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
for step in range(d_step):
# prepare loader for training
pos_samples = self.train_data.target # not re-sample the Oracle data
neg_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f,' % (
phase, step, d_loss, train_acc))
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
def get_mali_reward(self, samples):
rewards = []
for _ in range(cfg.rollout_num):
dis_out = self.dis(samples)[:, 1]
rewards.append(dis_out)
rewards = torch.mean(torch.stack(rewards, dim=0), dim=0) # batch_size
rewards = torch.div(rewards, 1 - rewards)
rewards = torch.div(rewards, torch.sum(rewards))
rewards -= torch.mean(rewards)
rewards = rewards.unsqueeze(1).expand(samples.size()) # batch_size * seq_len
return rewards
| 6,211 | 39.337662 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/seqgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : seqgan_instructor.py
# @Time : Created at 2019-06-05
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.SeqGAN_D import SeqGAN_D
from models.SeqGAN_G import SeqGAN_G
from utils import rollout
from utils.data_loader import GenDataIter, DisDataIter
class SeqGANInstructor(BasicInstructor):
def __init__(self, opt):
super(SeqGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = SeqGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA)
self.dis = SeqGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING===
# TRAIN GENERATOR
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (self.cal_metrics(fmt_str=True)))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
rollout_func = rollout.ROLLOUT(self.gen, cfg.CUDA)
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen.sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward(target, cfg.rollout_num, self.dis)
adv_loss = self.gen.batchPGLoss(inp, target, rewards)
self.optimize(self.gen_adv_opt, adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: g_loss = %.4f, %s' % (total_g_loss, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
for step in range(d_step):
# prepare loader for training
pos_samples = self.train_data.target
neg_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f,' % (
phase, step, d_loss, train_acc))
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
| 5,817 | 40.557143 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/trgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : relgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.train_data.instructor import BasicInstructor
from models.RelGAN_D import RelGAN_D
from models.RelGAN_G import RelGAN_G
from utils.helpers import get_fixed_temperature, get_losses
from torch import autograd
import os
class TRGANInstructor(BasicInstructor):
def __init__(self, opt):
super(TRGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.dis_D = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.dis_D_opt = optim.Adam(self.dis_D.parameters(), lr=cfg.dis_D_lr)
def init_model(self):
if cfg.oracle_pretrain:
if not os.path.exists(cfg.oracle_state_dict_path):
create_oracle()
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.oracle = self.oracle.cuda()
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
self.dis_D = self.dis_D.cuda()
def _run(self):
# ===PRE-TRAINING (GENERATOR)===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# # ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
self.sig.update()
if self.sig.adv_sig:
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator
self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature
progress.set_description(
'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))
# TEST
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (
adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
progress.close()
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.train_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
criterion = nn.BCELoss()
total_loss = 0
with torch.no_grad():
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
gen_samples = gen_samples.cuda()
D0 = torch.sigmoid(self.dis_D(gen_samples))
P0 = (1.-D0)/torch.clamp(D0, min = 1e-7)
for step in range(g_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
real_label = torch.full((D0.shape[0],), 1.)
fake_label = torch.full((D0.shape[0],), 0.)
if cfg.CUDA:
real_samples, gen_samples, real_label, fake_label = real_samples.cuda(), gen_samples.cuda(), real_label.cuda(), fake_label.cuda()
# print(self.dis_D(real_samples).shape, real_label.shape)
errDD_real = criterion(torch.sigmoid(self.dis_D(real_samples)), real_label)
errDD_fake = criterion(torch.sigmoid(self.dis_D(gen_samples.detach())), fake_label)
self.optimize(self.dis_D_opt, errDD_real+errDD_fake, self.dis_D)
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True).cuda()
real_samples = F.one_hot(self.train_data.random_batch()['target'], cfg.vocab_size).float().cuda()
D1 = torch.sigmoid(self.dis_D(gen_samples))
P1 = (1.-D1)
ratio = (P1/torch.clamp(D1*P0, min = 1e-7))
ratio_clipped = torch.clamp(ratio, 1.0 - cfg.clip_param, 1.0 + cfg.clip_param)
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
surr1 = ratio * d_out_fake
surr2 = ratio_clipped * d_out_fake
target = torch.where(d_out_fake>0, torch.min(surr1, surr2), torch.max(surr1, surr2))
g_loss, _ = get_losses(d_out_real, target, cfg.loss_type)
# g_loss = -d_out_fake.mean()
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss += g_loss.item()
return total_loss / g_step if g_step != 0 else 0
def calc_gradient_penalty(self, real_data, fake_data):
BATCH_SIZE = real_data.shape[0]
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(real_data.shape)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
# disc_interpolates = netD(interpolates)
disc_interpolates = self.dis(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def adv_train_discriminator(self, d_step):
total_loss = 0
for step in range(d_step):
real_samples = self.train_data.random_batch()['target']
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
_, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)
if cfg.GP:
gradient_penalty = self.calc_gradient_penalty(real_samples.data, gen_samples.data)
d_loss = d_loss+cfg.LAMBDA*gradient_penalty
# print(d_loss.shape)
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss += d_loss.item()
return total_loss / d_step if d_step != 0 else 0
def update_temperature(self, i, N):
self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
"""Add clip_grad_norm_"""
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
| 9,711 | 42.747748 | 145 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/real_data/sentigan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : sentigan_instructor.py
# @Time : Created at 2019-07-09
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.real_data.instructor import BasicInstructor
from models.SentiGAN_D import SentiGAN_D, SentiGAN_C
from models.SentiGAN_G import SentiGAN_G
from utils import rollout
from utils.cat_data_loader import CatClasDataIter
from utils.data_loader import GenDataIter
from utils.text_process import tensor_to_tokens, write_tokens
class SentiGANInstructor(BasicInstructor):
def __init__(self, opt):
super(SentiGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen_list = [SentiGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA) for _ in range(cfg.k_label)]
self.dis = SentiGAN_D(cfg.k_label, cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.clas = SentiGAN_C(cfg.k_label, cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.extend_vocab_size,
cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt_list = [optim.Adam(gen.parameters(), lr=cfg.gen_lr) for gen in self.gen_list]
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.clas_opt = optim.Adam(self.clas.parameters(), lr=cfg.clas_lr)
# Metrics
self.all_metrics.append(self.clas_acc)
def init_model(self):
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
for i in range(cfg.k_label):
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path + '%d' % i))
self.gen_list[i].load_state_dict(torch.load(cfg.pretrained_gen_path + '%d' % i))
if cfg.clas_pretrain:
self.log.info('Load pretrained classifier: {}'.format(cfg.pretrained_clas_path))
self.clas.load_state_dict(torch.load(cfg.pretrained_clas_path, map_location='cuda:%d' % cfg.device))
if cfg.CUDA:
for i in range(cfg.k_label):
self.gen_list[i] = self.gen_list[i].cuda()
self.dis = self.dis.cuda()
self.clas = self.clas.cuda()
def _run(self):
# ===Pre-train Classifier with real data===
if cfg.use_clas_acc:
self.log.info('Start training Classifier...')
self.train_classifier(cfg.PRE_clas_epoch)
# ===PRE-TRAIN GENERATOR===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
for i in range(cfg.k_label):
torch.save(self.gen_list[i].state_dict(), cfg.pretrained_gen_path + '%d' % i)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path + '%d' % i))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s', self.comb_metrics(fmt_str=True))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
for i in range(cfg.k_label):
pre_loss = self.train_gen_epoch(self.gen_list[i], self.train_data_list[i].loader,
self.mle_criterion, self.gen_opt_list[i])
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
if i == cfg.k_label - 1:
self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (
epoch, pre_loss, self.comb_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
for i in range(cfg.k_label):
rollout_func = rollout.ROLLOUT(self.gen_list[i], cfg.CUDA)
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen_list[i].sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward(target, cfg.rollout_num, self.dis, current_k=i)
adv_loss = self.gen_list[i].batchPGLoss(inp, target, rewards)
self.optimize(self.gen_opt_list[i], adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: %s', self.comb_metrics(fmt_str=True))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
for step in range(d_step):
# prepare loader for training
real_samples = []
fake_samples = []
for i in range(cfg.k_label):
real_samples.append(self.train_samples_list[i])
fake_samples.append(self.gen_list[i].sample(cfg.samples_num // cfg.k_label, 8 * cfg.batch_size))
dis_samples_list = [torch.cat(fake_samples, dim=0)] + real_samples
dis_data = CatClasDataIter(dis_samples_list)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f' % (
phase, step, d_loss, train_acc))
if cfg.if_save and not cfg.if_test and phase == 'MLE':
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen_list[label_i].sample(cfg.samples_num, 8 * cfg.batch_size)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen_list[label_i].sample(200, 200), self.idx2word_dict)
clas_data = CatClasDataIter([eval_samples], label_i)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data_list[label_i].tokens)
self.nll_gen.reset(self.gen_list[label_i], self.train_data_list[label_i].loader)
self.nll_div.reset(self.gen_list[label_i], gen_data.loader)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.clas_acc.reset(self.clas, clas_data.loader)
self.ppl.reset(gen_tokens)
return [metric.get_score() for metric in self.all_metrics]
def _save(self, phase, epoch):
"""Save model state dict and generator's samples"""
for i in range(cfg.k_label):
if phase != 'ADV':
torch.save(self.gen_list[i].state_dict(),
cfg.save_model_root + 'gen{}_{}_{:05d}.pt'.format(i, phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_d{}_{}_{:05d}.txt'.format(i, phase, epoch)
samples = self.gen_list[i].sample(cfg.batch_size, cfg.batch_size)
write_tokens(save_sample_path, tensor_to_tokens(samples, self.idx2word_dict))
| 9,725 | 44.877358 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/leakgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : leakgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.LeakGAN_D import LeakGAN_D
from models.LeakGAN_G import LeakGAN_G
from utils import rollout
from utils.data_loader import GenDataIter, DisDataIter
from utils.text_process import write_tensor
class LeakGANInstructor(BasicInstructor):
def __init__(self, opt):
super(LeakGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = LeakGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, cfg.goal_size, cfg.step_size, cfg.CUDA)
self.dis = LeakGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# optimizer
mana_params, work_params = self.gen.split_params()
mana_opt = optim.Adam(mana_params, lr=cfg.gen_lr)
work_opt = optim.Adam(work_params, lr=cfg.gen_lr)
self.gen_opt = [mana_opt, work_opt]
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
for inter_num in range(cfg.inter_epoch):
self.log.info('>>> Interleaved Round %d...' % inter_num)
self.sig.update() # update signal
if self.sig.pre_sig:
# ===DISCRIMINATOR PRE-TRAINING===
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===GENERATOR MLE TRAINING===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
else:
self.log.info('>>> Stop by pre_signal! Skip to adversarial training...')
break
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (str(self.cal_metrics(fmt_str=True))))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pretraining for the gen
- gen_opt: [mana_opt, work_opt]
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_mana_loss = 0
pre_work_loss = 0
# ===Train===
for i, data in enumerate(self.oracle_data.loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
mana_loss, work_loss = self.gen.pretrain_loss(target, self.dis)
self.optimize_multi(self.gen_opt, [mana_loss, work_loss])
pre_mana_loss += mana_loss.data.item()
pre_work_loss += work_loss.data.item()
pre_mana_loss = pre_mana_loss / len(self.oracle_data.loader)
pre_work_loss = pre_work_loss / len(self.oracle_data.loader)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info('[MLE-GEN] epoch %d : pre_mana_loss = %.4f, pre_work_loss = %.4f, %s' % (
epoch, pre_mana_loss, pre_work_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step, current_k=0):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
rollout_func = rollout.ROLLOUT(self.gen, cfg.CUDA)
adv_mana_loss = 0
adv_work_loss = 0
for step in range(g_step):
with torch.no_grad():
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, self.dis,
train=True) # !!! train=True, the only place
inp, target = GenDataIter.prepare(gen_samples, gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward_leakgan(target, cfg.rollout_num, self.dis,
current_k).cpu() # reward with MC search
mana_loss, work_loss = self.gen.adversarial_loss(target, rewards, self.dis)
# update parameters
self.optimize_multi(self.gen_opt, [mana_loss, work_loss])
adv_mana_loss += mana_loss.data.item()
adv_work_loss += work_loss.data.item()
# ===Test===
self.log.info('[ADV-GEN] adv_mana_loss = %.4f, adv_work_loss = %.4f, %s' % (
adv_mana_loss / g_step, adv_work_loss / g_step, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
pos_val = self.oracle.sample(8 * cfg.batch_size, cfg.batch_size)
neg_val = self.gen.sample(8 * cfg.batch_size, cfg.batch_size, self.dis)
dis_eval_data = DisDataIter(pos_val, neg_val)
for step in range(d_step):
# prepare loader for training
pos_samples = self.oracle.sample(cfg.samples_num, cfg.batch_size) # re-sample the Oracle Data
neg_samples = self.gen.sample(cfg.samples_num, cfg.batch_size, self.dis)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
_, eval_acc = self.eval_dis(self.dis, dis_eval_data.loader, self.dis_criterion)
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f, eval_acc = %.4f,' % (
phase, step, d_loss, train_acc, eval_acc))
def cal_metrics(self, fmt_str=False):
# Prepare data for evaluation
gen_data = GenDataIter(self.gen.sample(cfg.samples_num, cfg.batch_size, self.dis))
# Reset metrics
self.nll_oracle.reset(self.oracle, gen_data.loader)
self.nll_gen.reset(self.gen, self.oracle_data.loader, leak_dis=self.dis)
self.nll_div.reset(self.gen, gen_data.loader, leak_dis=self.dis)
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), metric.get_score()) for metric in self.all_metrics])
else:
return [metric.get_score() for metric in self.all_metrics]
def _save(self, phase, epoch):
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phase, epoch)
samples = self.gen.sample(cfg.batch_size, cfg.batch_size, self.dis)
write_tensor(save_sample_path, samples)
| 8,922 | 44.294416 | 114 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import numpy as np
import os
import torch
import torch.nn as nn
import config as cfg
from metrics.nll import NLL
from models.Oracle import Oracle
from utils.data_loader import GenDataIter
from utils.data_utils import create_multi_oracle
from utils.helpers import Signal, create_logger, create_oracle, get_fixed_temperature
from utils.text_process import write_tensor
class BasicInstructor:
def __init__(self, opt):
self.log = create_logger(__name__, silent=False, to_disk=True,
log_file=cfg.log_filename if cfg.if_test
else [cfg.log_filename, cfg.save_root + 'log.txt'])
self.sig = Signal(cfg.signal_file)
self.opt = opt
# oracle, generator, discriminator
self.oracle = Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA)
self.oracle_list = [Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA) for _ in range(cfg.k_label)]
self.dis = None
self.clas = None
self.show_config()
self.check_oracle() # Create Oracle models if not exist
# DataLoader
self.oracle_samples = torch.load(cfg.oracle_samples_path.format(cfg.samples_num))
self.oracle_samples_list = [torch.load(cfg.multi_oracle_samples_path.format(i, cfg.samples_num))
for i in range(cfg.k_label)]
self.oracle_data = GenDataIter(self.oracle_samples)
self.oracle_data_list = [GenDataIter(self.oracle_samples_list[i]) for i in range(cfg.k_label)]
# Criterion
self.mle_criterion = nn.NLLLoss()
self.dis_criterion = nn.CrossEntropyLoss()
# Metrics
self.nll_oracle = NLL('NLL_oracle', if_use=cfg.use_nll_oracle, gpu=cfg.CUDA)
self.nll_gen = NLL('NLL_gen', if_use=cfg.use_nll_gen, gpu=cfg.CUDA)
self.nll_div = NLL('NLL_div', if_use=cfg.use_nll_div, gpu=cfg.CUDA)
self.all_metrics = [self.nll_oracle, self.nll_gen, self.nll_div]
def _run(self):
print('Nothing to run in Basic Instructor!')
pass
def _test(self):
pass
def init_model(self):
if cfg.oracle_pretrain:
if not os.path.exists(cfg.oracle_state_dict_path):
create_oracle()
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.oracle = self.oracle.cuda()
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
def train_gen_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size)
pred = model.forward(inp, hidden)
loss = criterion(pred, target.view(-1))
self.optimize(optimizer, loss, model)
total_loss += loss.item()
return total_loss / len(data_loader)
def train_dis_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
total_acc = 0
total_num = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
self.optimize(optimizer, loss, model)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def eval_dis(model, data_loader, criterion):
total_loss = 0
total_acc = 0
total_num = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def optimize_multi(opts, losses):
for i, (opt, loss) in enumerate(zip(opts, losses)):
opt.zero_grad()
loss.backward(retain_graph=True if i < len(opts) - 1 else False)
opt.step()
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
def show_config(self):
"""Show parser parameters settings"""
self.log.info(100 * '=')
self.log.info('> training arguments:')
for arg in vars(self.opt):
self.log.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
self.log.info(100 * '=')
def cal_metrics(self, fmt_str=False):
"""
Calculate metrics
:param fmt_str: if return format string for logging
"""
with torch.no_grad():
# Prepare data for evaluation
gen_data = GenDataIter(self.gen.sample(cfg.samples_num, 4 * cfg.batch_size))
# Reset metrics
self.nll_oracle.reset(self.oracle, gen_data.loader)
self.nll_gen.reset(self.gen, self.oracle_data.loader)
self.nll_div.reset(self.gen, gen_data.loader)
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), metric.get_score()) for metric in self.all_metrics])
else:
return [metric.get_score() for metric in self.all_metrics]
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen.sample(cfg.samples_num, 8 * cfg.batch_size, label_i=label_i)
gen_data = GenDataIter(eval_samples)
# Reset metrics
self.nll_oracle.reset(self.oracle_list[label_i], gen_data.loader, label_i)
self.nll_gen.reset(self.gen, self.oracle_data_list[label_i].loader, label_i)
self.nll_div.reset(self.gen, gen_data.loader, label_i)
return [metric.get_score() for metric in self.all_metrics]
def comb_metrics(self, fmt_str=False):
all_scores = [self.cal_metrics_with_label(label_i) for label_i in range(cfg.k_label)]
all_scores = np.array(all_scores).T.tolist() # each row for each metric
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), score)
for (metric, score) in zip(self.all_metrics, all_scores)])
return all_scores
def _save(self, phase, epoch):
"""Save model state dict and generator's samples"""
if phase != 'ADV':
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phase, epoch)
samples = self.gen.sample(cfg.batch_size, cfg.batch_size)
write_tensor(save_sample_path, samples)
def update_temperature(self, i, N):
self.gen.temperature.data = torch.Tensor([get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)])
if cfg.CUDA:
self.gen.temperature.data = self.gen.temperature.data.cuda()
def check_oracle(self):
if not cfg.oracle_pretrain:
create_oracle()
create_multi_oracle(cfg.k_label)
# General text generation Oracle model
if not os.path.exists(cfg.oracle_samples_path.format(cfg.samples_num)) or not cfg.oracle_pretrain:
create_oracle()
# Category text generation Oracle models
for i in range(cfg.k_label):
if not os.path.exists(cfg.multi_oracle_samples_path.format(i, cfg.samples_num)):
create_multi_oracle(cfg.k_label)
break
# Load Oracle state dict
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
for i in range(cfg.k_label):
oracle_path = cfg.multi_oracle_state_dict_path.format(i)
self.oracle_list[i].load_state_dict(torch.load(oracle_path))
| 9,609 | 39.041667 | 116 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/relgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : relgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.RelGAN_D import RelGAN_D
from models.RelGAN_G import RelGAN_G
from utils.helpers import get_fixed_temperature, get_losses
from torch import autograd
class RelGANInstructor(BasicInstructor):
def __init__(self, opt):
super(RelGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING (GENERATOR)===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# # ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
self.sig.update()
if self.sig.adv_sig:
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator
self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature
progress.set_description(
'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))
# TEST
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (
adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
progress.close()
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
total_loss = 0
for step in range(g_step):
real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float()
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
# print(gen_samples)
# print(real_samples)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
g_loss, _ = get_losses(d_out_real, d_out_fake, cfg.loss_type)
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss += g_loss.item()
return total_loss / g_step if g_step != 0 else 0
def calc_gradient_penalty(self, real_data, fake_data):
BATCH_SIZE = real_data.shape[0]
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(real_data.shape)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
# disc_interpolates = netD(interpolates)
disc_interpolates = self.dis(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def adv_train_discriminator(self, d_step):
total_loss = 0
for step in range(d_step):
real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float()
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
_, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)
if cfg.GP:
gradient_penalty = self.calc_gradient_penalty(real_samples.data, gen_samples.data)
d_loss = d_loss+cfg.LAMBDA*gradient_penalty
# print(d_loss.shape)
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss += d_loss.item()
return total_loss / d_step if d_step != 0 else 0
def update_temperature(self, i, N):
self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
"""Add clip_grad_norm_"""
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None and cfg.clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
| 7,248 | 40.1875 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/jsdgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : JSDGAN_instructor.py
# @Time : Created at 2019/11/16
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import os
import torch
import torch.optim as optim
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.JSDGAN_G import JSDGAN_G
from utils.helpers import create_oracle
class JSDGANInstructor(BasicInstructor):
def __init__(self, opt):
super(JSDGANInstructor, self).__init__(opt)
# generator
self.gen = JSDGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
def init_model(self):
if cfg.oracle_pretrain:
if not os.path.exists(cfg.oracle_state_dict_path):
create_oracle()
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.oracle = self.oracle.cuda()
self.gen = self.gen.cuda()
def _run(self):
# ===PRE-TRAINING===
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
for adv_epoch in range(cfg.ADV_train_epoch):
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss = %.4f, %s' % (adv_epoch, g_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
global inp, target
total_loss = 0
for step in range(g_step):
for i, data in enumerate(self.oracle_data.loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
# ===Train===
adv_loss = self.gen.JSD_loss(inp, target)
self.optimize(self.gen_opt, adv_loss, self.gen)
total_loss += adv_loss.item()
return total_loss
| 3,844 | 35.273585 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/maligan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : maligan_instructor.py
# @Time : Created at 2019/10/17
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.MaliGAN_D import MaliGAN_D
from models.MaliGAN_G import MaliGAN_G
from utils.data_loader import GenDataIter, DisDataIter
class MaliGANInstructor(BasicInstructor):
def __init__(self, opt):
super(MaliGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = MaliGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA)
self.dis = MaliGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING===
# TRAIN GENERATOR
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (self.cal_metrics(fmt_str=True)))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained by MLE-like objective.
"""
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen.sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = self.get_mali_reward(target)
adv_loss = self.gen.adv_loss(inp, target, rewards)
self.optimize(self.gen_adv_opt, adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: g_loss = %.4f, %s' % (total_g_loss, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
pos_val = self.oracle.sample(8 * cfg.batch_size, 4 * cfg.batch_size)
neg_val = self.gen.sample(8 * cfg.batch_size, 4 * cfg.batch_size)
dis_eval_data = DisDataIter(pos_val, neg_val)
for step in range(d_step):
# prepare loader for training
pos_samples = self.oracle_samples # not re-sample the Oracle data
neg_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
_, eval_acc = self.eval_dis(self.dis, dis_eval_data.loader, self.dis_criterion)
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f, eval_acc = %.4f,' % (
phase, step, d_loss, train_acc, eval_acc))
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
def get_mali_reward(self, samples):
rewards = []
for _ in range(cfg.rollout_num):
dis_out = self.dis(samples)[:, 1]
rewards.append(dis_out)
rewards = torch.mean(torch.stack(rewards, dim=0), dim=0) # batch_size
rewards = torch.div(rewards, 1 - rewards)
rewards = torch.div(rewards, torch.sum(rewards))
rewards -= torch.mean(rewards)
rewards = rewards.unsqueeze(1).expand(samples.size()) # batch_size * seq_len
return rewards
| 6,497 | 40.388535 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/seqgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : seqgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.optim as optim
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.SeqGAN_D import SeqGAN_D
from models.SeqGAN_G import SeqGAN_G
from utils import rollout
from utils.data_loader import GenDataIter, DisDataIter
class SeqGANInstructor(BasicInstructor):
def __init__(self, opt):
super(SeqGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = SeqGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA)
self.dis = SeqGAN_D(cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def _run(self):
# ===PRE-TRAINING===
# TRAIN GENERATOR
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s' % (self.cal_metrics(fmt_str=True)))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
rollout_func = rollout.ROLLOUT(self.gen, cfg.CUDA)
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen.sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward(target, cfg.rollout_num, self.dis)
adv_loss = self.gen.batchPGLoss(inp, target, rewards)
self.optimize(self.gen_adv_opt, adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: g_loss = %.4f, %s' % (total_g_loss, self.cal_metrics(fmt_str=True)))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
pos_val = self.oracle.sample(8 * cfg.batch_size, 4 * cfg.batch_size)
neg_val = self.gen.sample(8 * cfg.batch_size, 4 * cfg.batch_size)
dis_eval_data = DisDataIter(pos_val, neg_val)
for step in range(d_step):
# prepare loader for training
pos_samples = self.oracle_samples # not re-sample the Oracle data
neg_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
dis_data = DisDataIter(pos_samples, neg_samples)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
_, eval_acc = self.eval_dis(self.dis, dis_eval_data.loader, self.dis_criterion)
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f, eval_acc = %.4f,' % (
phase, step, d_loss, train_acc, eval_acc))
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
| 6,175 | 41.593103 | 119 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/trgan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : relgan_instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.RelGAN_D import RelGAN_D
from models.RelGAN_G import RelGAN_G
from utils.helpers import get_fixed_temperature, get_losses
from torch import autograd
import os
class TRGANInstructor(BasicInstructor):
def __init__(self, opt):
super(TRGANInstructor, self).__init__(opt)
# generator, discriminator
self.gen = RelGAN_G(cfg.mem_slots, cfg.num_heads, cfg.head_size, cfg.gen_embed_dim, cfg.gen_hidden_dim,
cfg.vocab_size, cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
self.dis = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.dis_D = RelGAN_D(cfg.dis_embed_dim, cfg.max_seq_len, cfg.num_rep, cfg.vocab_size, cfg.padding_idx,
gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_lr)
self.gen_adv_opt = optim.Adam(self.gen.parameters(), lr=cfg.gen_adv_lr)
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
self.dis_D_opt = optim.Adam(self.dis_D.parameters(), lr=cfg.dis_D_lr)
def init_model(self):
if cfg.oracle_pretrain:
if not os.path.exists(cfg.oracle_state_dict_path):
create_oracle()
self.oracle.load_state_dict(torch.load(cfg.oracle_state_dict_path))
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path))
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.oracle = self.oracle.cuda()
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
self.dis_D = self.dis_D.cuda()
def _run(self):
# ===PRE-TRAINING (GENERATOR)===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.gen.state_dict(), cfg.pretrained_gen_path)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path))
# # ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
progress = tqdm(range(cfg.ADV_train_epoch))
for adv_epoch in progress:
self.sig.update()
if self.sig.adv_sig:
g_loss = self.adv_train_generator(cfg.ADV_g_step) # Generator
d_loss = self.adv_train_discriminator(cfg.ADV_d_step) # Discriminator
self.update_temperature(adv_epoch, cfg.ADV_train_epoch) # update temperature
progress.set_description(
'g_loss: %.4f, d_loss: %.4f, temperature: %.4f' % (g_loss, d_loss, self.gen.temperature))
# TEST
if adv_epoch % cfg.adv_log_step == 0:
self.log.info('[ADV] epoch %d: g_loss: %.4f, d_loss: %.4f, %s' % (
adv_epoch, g_loss, d_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
progress.close()
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
# ===Train===
pre_loss = self.train_gen_epoch(self.gen, self.oracle_data.loader, self.mle_criterion, self.gen_opt)
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
self.log.info(
'[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (epoch, pre_loss, self.cal_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
criterion = nn.BCELoss()
total_loss = 0
with torch.no_grad():
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
gen_samples = gen_samples.cuda()
D0 = torch.sigmoid(self.dis_D(gen_samples))
P0 = (1.-D0)/torch.clamp(D0, min = 1e-7)
for step in range(g_step):
real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float()
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
real_label = torch.full((D0.shape[0],), 1.)
fake_label = torch.full((D0.shape[0],), 0.)
if cfg.CUDA:
real_samples, gen_samples, real_label, fake_label = real_samples.cuda(), gen_samples.cuda(), real_label.cuda(), fake_label.cuda()
# print(self.dis_D(real_samples).shape, real_label.shape)
errDD_real = criterion(torch.sigmoid(self.dis_D(real_samples)), real_label)
errDD_fake = criterion(torch.sigmoid(self.dis_D(gen_samples.detach())), fake_label)
self.optimize(self.dis_D_opt, errDD_real+errDD_fake, self.dis_D)
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True).cuda()
real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float().cuda()
D1 = torch.sigmoid(self.dis_D(gen_samples))
P1 = (1.-D1)
ratio = (P1/torch.clamp(D1*P0, min = 1e-7))
ratio_clipped = torch.clamp(ratio, 1.0 - cfg.clip_param, 1.0 + cfg.clip_param)
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
surr1 = ratio * d_out_fake
surr2 = ratio_clipped * d_out_fake
target = torch.where(d_out_fake>0, torch.min(surr1, surr2), torch.max(surr1, surr2))
g_loss, _ = get_losses(d_out_real, target, cfg.loss_type)
# g_loss = -d_out_fake.mean()
self.optimize(self.gen_adv_opt, g_loss, self.gen)
total_loss += g_loss.item()
return total_loss / g_step if g_step != 0 else 0
def calc_gradient_penalty(self, real_data, fake_data):
BATCH_SIZE = real_data.shape[0]
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(real_data.shape)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
# disc_interpolates = netD(interpolates)
disc_interpolates = self.dis(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.contiguous().view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def adv_train_discriminator(self, d_step):
total_loss = 0
for step in range(d_step):
real_samples = F.one_hot(self.oracle_data.random_batch()['target'], cfg.vocab_size).float()
gen_samples = self.gen.sample(cfg.batch_size, cfg.batch_size, one_hot=True)
if cfg.CUDA:
real_samples, gen_samples = real_samples.cuda(), gen_samples.cuda()
# ===Train===
d_out_real = self.dis(real_samples)
d_out_fake = self.dis(gen_samples)
_, d_loss = get_losses(d_out_real, d_out_fake, cfg.loss_type)
if cfg.GP:
gradient_penalty = self.calc_gradient_penalty(real_samples.data, gen_samples.data)
d_loss = d_loss+cfg.LAMBDA*gradient_penalty
# print(d_loss.shape)
self.optimize(self.dis_opt, d_loss, self.dis)
total_loss += d_loss.item()
return total_loss / d_step if d_step != 0 else 0
def update_temperature(self, i, N):
self.gen.temperature = get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
"""Add clip_grad_norm_"""
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
| 9,786 | 43.085586 | 145 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/instructor/oracle_data/sentigan_instructor.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : sentigan_instructor.py
# @Time : Created at 2019-07-26
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import os
import torch
import torch.optim as optim
import config as cfg
from instructor.oracle_data.instructor import BasicInstructor
from models.Oracle import Oracle
from models.SentiGAN_D import SentiGAN_D
from models.SentiGAN_G import SentiGAN_G
from utils import rollout
from utils.cat_data_loader import CatClasDataIter
from utils.data_loader import GenDataIter
from utils.data_utils import create_multi_oracle
from utils.text_process import write_tensor
class SentiGANInstructor(BasicInstructor):
def __init__(self, opt):
super(SentiGANInstructor, self).__init__(opt)
# generator, discriminator
self.oracle_list = [Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA) for _ in range(cfg.k_label)]
self.gen_list = [SentiGAN_G(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size, cfg.max_seq_len,
cfg.padding_idx, gpu=cfg.CUDA) for _ in range(cfg.k_label)]
self.dis = SentiGAN_D(cfg.k_label, cfg.dis_embed_dim, cfg.vocab_size, cfg.padding_idx, gpu=cfg.CUDA)
self.init_model()
# Optimizer
self.gen_opt_list = [optim.Adam(gen.parameters(), lr=cfg.gen_lr) for gen in self.gen_list]
self.dis_opt = optim.Adam(self.dis.parameters(), lr=cfg.dis_lr)
def init_model(self):
if cfg.oracle_pretrain:
for i in range(cfg.k_label):
oracle_path = cfg.multi_oracle_state_dict_path.format(i)
if not os.path.exists(oracle_path):
create_multi_oracle(cfg.k_label)
self.oracle_list[i].load_state_dict(torch.load(oracle_path))
if cfg.dis_pretrain:
self.log.info(
'Load pretrained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path))
if cfg.gen_pretrain:
for i in range(cfg.k_label):
self.log.info('Load MLE pretrained generator gen: {}'.format(cfg.pretrained_gen_path + '%d' % i))
self.gen_list[i].load_state_dict(torch.load(cfg.pretrained_gen_path + '%d' % i))
if cfg.CUDA:
for i in range(cfg.k_label):
self.oracle_list[i] = self.oracle_list[i].cuda()
self.gen_list[i] = self.gen_list[i].cuda()
self.dis = self.dis.cuda()
def _run(self):
# ===PRE-TRAIN GENERATOR===
if not cfg.gen_pretrain:
self.log.info('Starting Generator MLE Training...')
self.pretrain_generator(cfg.MLE_train_epoch)
if cfg.if_save and not cfg.if_test:
for i in range(cfg.k_label):
torch.save(self.gen_list[i].state_dict(), cfg.pretrained_gen_path + '%d' % i)
print('Save pre-trained generator: {}'.format(cfg.pretrained_gen_path + '%d' % i))
# ===TRAIN DISCRIMINATOR====
if not cfg.dis_pretrain:
self.log.info('Starting Discriminator Training...')
self.train_discriminator(cfg.d_step, cfg.d_epoch)
if cfg.if_save and not cfg.if_test:
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
print('Save pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
# ===ADVERSARIAL TRAINING===
self.log.info('Starting Adversarial Training...')
self.log.info('Initial generator: %s', self.comb_metrics(fmt_str=True))
for adv_epoch in range(cfg.ADV_train_epoch):
self.log.info('-----\nADV EPOCH %d\n-----' % adv_epoch)
self.sig.update()
if self.sig.adv_sig:
self.adv_train_generator(cfg.ADV_g_step) # Generator
self.train_discriminator(cfg.ADV_d_step, cfg.ADV_d_epoch, 'ADV') # Discriminator
if adv_epoch % cfg.adv_log_step == 0:
if cfg.if_save and not cfg.if_test:
self._save('ADV', adv_epoch)
else:
self.log.info('>>> Stop by adv_signal! Finishing adversarial training...')
break
def _test(self):
print('>>> Begin test...')
self._run()
pass
def pretrain_generator(self, epochs):
"""
Max Likelihood Pre-training for the generator
"""
for epoch in range(epochs):
self.sig.update()
if self.sig.pre_sig:
for i in range(cfg.k_label):
pre_loss = self.train_gen_epoch(self.gen_list[i], self.oracle_data_list[i].loader,
self.mle_criterion, self.gen_opt_list[i])
# ===Test===
if epoch % cfg.pre_log_step == 0 or epoch == epochs - 1:
if i == cfg.k_label - 1:
self.log.info('[MLE-GEN] epoch %d : pre_loss = %.4f, %s' % (
epoch, pre_loss, self.comb_metrics(fmt_str=True)))
if cfg.if_save and not cfg.if_test:
self._save('MLE', epoch)
else:
self.log.info('>>> Stop by pre signal, skip to adversarial training...')
break
def adv_train_generator(self, g_step):
"""
The gen is trained using policy gradients, using the reward from the discriminator.
Training is done for num_batches batches.
"""
for i in range(cfg.k_label):
rollout_func = rollout.ROLLOUT(self.gen_list[i], cfg.CUDA)
total_g_loss = 0
for step in range(g_step):
inp, target = GenDataIter.prepare(self.gen_list[i].sample(cfg.batch_size, cfg.batch_size), gpu=cfg.CUDA)
# ===Train===
rewards = rollout_func.get_reward(target, cfg.rollout_num, self.dis)
adv_loss = self.gen_list[i].batchPGLoss(inp, target, rewards)
self.optimize(self.gen_opt_list[i], adv_loss)
total_g_loss += adv_loss.item()
# ===Test===
self.log.info('[ADV-GEN]: %s', self.comb_metrics(fmt_str=True))
def train_discriminator(self, d_step, d_epoch, phase='MLE'):
"""
Training the discriminator on real_data_samples (positive) and generated samples from gen (negative).
Samples are drawn d_step times, and the discriminator is trained for d_epoch d_epoch.
"""
# prepare loader for validate
global d_loss, train_acc
for step in range(d_step):
# prepare loader for training
real_samples = []
fake_samples = []
for i in range(cfg.k_label):
real_samples.append(self.oracle_samples_list[i])
fake_samples.append(self.gen_list[i].sample(cfg.samples_num // cfg.k_label, 8 * cfg.batch_size))
dis_samples_list = [torch.cat(fake_samples, dim=0)] + real_samples
dis_data = CatClasDataIter(dis_samples_list)
for epoch in range(d_epoch):
# ===Train===
d_loss, train_acc = self.train_dis_epoch(self.dis, dis_data.loader, self.dis_criterion,
self.dis_opt)
# ===Test===
self.log.info('[%s-DIS] d_step %d: d_loss = %.4f, train_acc = %.4f' % (
phase, step, d_loss, train_acc))
if cfg.if_save and not cfg.if_test and phase == 'MLE':
torch.save(self.dis.state_dict(), cfg.pretrained_dis_path)
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
# Prepare data for evaluation
eval_samples = self.gen_list[label_i].sample(cfg.samples_num, 8 * cfg.batch_size)
gen_data = GenDataIter(eval_samples)
# Reset metrics
self.nll_oracle.reset(self.oracle_list[label_i], gen_data.loader)
self.nll_gen.reset(self.gen_list[label_i], self.oracle_data_list[label_i].loader)
self.nll_div.reset(self.gen_list[label_i], gen_data.loader)
return [metric.get_score() for metric in self.all_metrics]
def _save(self, phase, epoch):
"""Save model state dict and generator's samples"""
for i in range(cfg.k_label):
torch.save(self.gen_list[i].state_dict(),
cfg.save_model_root + 'gen{}_{}_{:05d}.pt'.format(i, phase, epoch))
save_sample_path = cfg.save_samples_root + 'samples_d{}_{}_{:05d}.txt'.format(i, phase, epoch)
samples = self.gen_list[i].sample(cfg.batch_size, cfg.batch_size)
write_tensor(save_sample_path, samples)
| 9,006 | 43.589109 | 120 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/data_loader.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : data_loader.py
# @Time : Created at 2019-05-31
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import random
from torch.utils.data import Dataset, DataLoader
from utils.text_process import *
class GANDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class GenDataIter:
def __init__(self, samples, if_test_data=False, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
if cfg.if_real_data:
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
if if_test_data: # used for the classifier
self.word2idx_dict, self.idx2word_dict = load_test_dict(cfg.dataset)
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
self.input = self._all_data_('input')
self.target = self._all_data_('target')
def __read_data__(self, samples):
"""
input: same as target, but start with start_letter.
"""
# global all_data
if isinstance(samples, torch.Tensor): # Tensor
inp, target = self.prepare(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
elif isinstance(samples, str): # filename
inp, target = self.load_data(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
else:
all_data = None
return all_data
def random_batch(self):
"""Randomly choose a batch from loader, please note that the data should not be shuffled."""
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def _all_data_(self, col):
return torch.cat([data[col].unsqueeze(0) for data in self.loader.dataset.data], 0)
@staticmethod
def prepare(samples, gpu=False):
"""Add start_letter to samples as inp, target same as samples"""
inp = torch.zeros(samples.size()).long()
target = samples
inp[:, 0] = cfg.start_letter
inp[:, 1:] = target[:, :cfg.max_seq_len - 1]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
def load_data(self, filename):
"""Load real data from local file"""
self.tokens = get_tokenlized(filename)
samples_index = tokens_to_tensor(self.tokens, self.word2idx_dict)
return self.prepare(samples_index)
class DisDataIter:
def __init__(self, pos_samples, neg_samples, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(pos_samples, neg_samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
def __read_data__(self, pos_samples, neg_samples):
"""
input: same as target, but start with start_letter.
"""
inp, target = self.prepare(pos_samples, neg_samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
return all_data
def random_batch(self):
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def prepare(self, pos_samples, neg_samples, gpu=False):
"""Build inp and target"""
inp = torch.cat((pos_samples, neg_samples), dim=0).long().detach() # !!!need .detach()
target = torch.ones(inp.size(0)).long()
target[pos_samples.size(0):] = 0
# shuffle
perm = torch.randperm(inp.size(0))
inp = inp[perm]
target = target[perm]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
| 4,339 | 32.90625 | 100 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/data_utils.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : data_utils.py
# @Time : Created at 2019-03-16
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
from time import strftime, localtime
import torch.nn as nn
from metrics.nll import NLL
from models.Oracle import Oracle
from utils.data_loader import GenDataIter
from utils.text_process import *
def create_multi_oracle(number):
for i in range(number):
print('Creating Oracle %d...' % i)
oracle = Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size,
cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
if cfg.CUDA:
oracle = oracle.cuda()
large_samples = oracle.sample(cfg.samples_num, 4 * cfg.batch_size)
small_samples = oracle.sample(cfg.samples_num // 2, 4 * cfg.batch_size)
torch.save(oracle.state_dict(), cfg.multi_oracle_state_dict_path.format(i))
torch.save(large_samples, cfg.multi_oracle_samples_path.format(i, cfg.samples_num))
torch.save(small_samples, cfg.multi_oracle_samples_path.format(i, cfg.samples_num // 2))
oracle_data = GenDataIter(large_samples)
mle_criterion = nn.NLLLoss()
groud_truth = NLL.cal_nll(oracle, oracle_data.loader, mle_criterion)
print('Oracle %d Groud Truth: %.4f' % (i, groud_truth))
def create_specific_oracle(from_a, to_b, num=1, save_path='../pretrain/'):
for i in range(num):
while True:
oracle = Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size,
cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
if cfg.CUDA:
oracle = oracle.cuda()
big_samples = oracle.sample(cfg.samples_num, 8 * cfg.batch_size)
small_samples = oracle.sample(cfg.samples_num // 2, 8 * cfg.batch_size)
oracle_data = GenDataIter(big_samples)
mle_criterion = nn.NLLLoss()
groud_truth = NLL.cal_nll(oracle, oracle_data.loader, mle_criterion)
if from_a <= groud_truth <= to_b:
dir_path = save_path + 'oracle_data_gt{:.2f}_{}'.format(groud_truth,
strftime("%m%d_%H%M%S", localtime()))
if not os.path.exists(dir_path):
os.mkdir(dir_path)
print('save ground truth: ', groud_truth)
# prefix = 'oracle{}_lstm_gt{:.2f}_{}'.format(i, groud_truth, strftime("%m%d", localtime()))
prefix = dir_path + '/oracle_lstm'
torch.save(oracle.state_dict(), '{}.pt'.format(prefix))
torch.save(big_samples, '{}_samples_{}.pt'.format(prefix, cfg.samples_num))
torch.save(small_samples, '{}_samples_{}.pt'.format(prefix, cfg.samples_num // 2))
break
def create_many_oracle(from_a, to_b, num=1, save_path='../pretrain/'):
for i in range(num):
while True:
oracle = Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size,
cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
if cfg.CUDA:
oracle = oracle.cuda()
big_samples = oracle.sample(cfg.samples_num, 8 * cfg.batch_size)
small_samples = oracle.sample(cfg.samples_num // 2, 8 * cfg.batch_size)
oracle_data = GenDataIter(big_samples)
mle_criterion = nn.NLLLoss()
groud_truth = NLL.cal_nll(oracle, oracle_data.loader, mle_criterion)
if from_a <= groud_truth <= to_b:
print('save ground truth: ', groud_truth)
prefix = 'oracle_lstm'
torch.save(oracle.state_dict(), save_path + '{}.pt'.format(prefix))
torch.save(big_samples, save_path + '{}_samples_{}.pt'.format(prefix, cfg.samples_num))
torch.save(small_samples, save_path + '{}_samples_{}.pt'.format(prefix, cfg.samples_num // 2))
break
def _save(data, filename):
with open(filename, 'w') as fout:
for d in data:
fout.write(d['reviewText'] + '\n')
fout.write(str(d['overall']) + '\n')
def _count(filename):
with open(filename, 'r') as fin:
data = fin.read().strip().split('\n')
return len(data) / 2
def clean_amazon_long_sentence():
data_root = '/home/sysu2018/Documents/william/amazon_dataset/'
all_files = os.listdir(data_root)
print('|\ttype\t|\torigin\t|\tclean_40\t|\tclean_20\t|\tfinal_40\t|\tfinal_20\t|')
print('|----------|----------|----------|----------|----------|----------|')
for file in all_files:
filename = data_root + file
if os.path.isdir(filename):
continue
clean_save_40 = []
clean_save_20 = []
final_save_40 = []
final_save_20 = []
with open(filename, 'r') as fin:
raw_data = fin.read().strip().split('\n')
for line in raw_data:
review = eval(line)['reviewText']
if len(review.split()) <= 40:
clean_save_40.append(eval(line))
if len(review.split('.')) <= 2: # one sentence
final_save_40.append(eval(line))
if len(review.split()) <= 20:
clean_save_20.append(eval(line))
if len(review.split('.')) <= 2: # one sentence
final_save_20.append(eval(line))
save_filename = data_root + 'clean_40/' + file.lower().split('_5')[0] + '.txt'
_save(clean_save_40, save_filename)
# a = _count(save_filename)
save_filename = data_root + 'clean_20/' + file.lower().split('_5')[0] + '.txt'
_save(clean_save_20, save_filename)
# b = _count(save_filename)
save_filename = data_root + 'final_40/' + file.lower().split('_5')[0] + '.txt'
_save(final_save_40, save_filename)
# c = _count(save_filename)
save_filename = data_root + 'final_20/' + file.lower().split('_5')[0] + '.txt'
_save(final_save_20, save_filename)
# d = _count(save_filename)
print('|\t%s\t|\t%d\t|\t%d\t|\t%d\t|\t%d\t|\t%d\t|' % (
file.lower().split('_5')[0], len(raw_data),
len(clean_save_40), len(clean_save_20),
len(final_save_40), len(final_save_20)))
# print('|\t%s\t|\t%d\t|\t%d\t|\t%d\t|\t%d\t|\t%d\t|' % (
# file.lower().split('_5')[0], len(raw_data), a, b, c, d))
def mean(x, y):
return round((2 * x * y) / (x + y), 3)
def mean_list(x, y):
res = []
for i, j in zip(x, y):
res.append(round(mean(i, j), 3))
return res
if __name__ == '__main__':
pass
| 6,818 | 39.589286 | 110 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/cat_data_loader.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : cat_data_loader.py
# @Time : Created at 2019-05-31
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import random
from torch.utils.data import Dataset, DataLoader
from utils.text_process import *
class GANDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class CatGenDataIter:
def __init__(self, samples_list, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
if cfg.if_real_data:
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(samples_list)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
self.input = self._all_data_('input')
self.target = self._all_data_('target')
self.label = self._all_data_('label') # from 0 to k-1, different from Discriminator label
def __read_data__(self, samples_list):
"""
input: same as target, but start with start_letter.
"""
inp, target, label = self.prepare(samples_list)
all_data = [{'input': i, 'target': t, 'label': l} for (i, t, l) in zip(inp, target, label)]
return all_data
def random_batch(self):
"""Randomly choose a batch from loader, please note that the data should not be shuffled."""
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def _all_data_(self, col):
return torch.cat([data[col].unsqueeze(0) for data in self.loader.dataset.data], 0)
def prepare(self, samples_list, gpu=False):
"""Add start_letter to samples as inp, target same as samples"""
all_samples = torch.cat(samples_list, dim=0).long()
target = all_samples
inp = torch.zeros(all_samples.size()).long()
inp[:, 0] = self.start_letter
inp[:, 1:] = target[:, :self.max_seq_len - 1]
label = torch.zeros(all_samples.size(0)).long()
for idx in range(len(samples_list)):
start = sum([samples_list[i].size(0) for i in range(idx)])
label[start: start + samples_list[idx].size(0)] = idx
# shuffle
perm = torch.randperm(inp.size(0))
inp = inp[perm].detach()
target = target[perm].detach()
label = label[perm].detach()
if gpu:
return inp.cuda(), target.cuda(), label.cuda()
return inp, target, label
def load_data(self, filename):
"""Load real data from local file"""
self.tokens = get_tokenlized(filename)
samples_index = tokens_to_tensor(self.tokens, self.word2idx_dict)
return self.prepare(samples_index)
class CatClasDataIter:
"""Classifier data loader, handle for multi label data"""
def __init__(self, samples_list, given_target=None, shuffle=None):
"""
- samples_list: list of tensors, [label_0, label_1, ..., label_k]
"""
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(samples_list, given_target)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
self.input = self._all_data_('input')
self.target = self._all_data_('target')
def __read_data__(self, samples_list, given_target=None):
inp, target = self.prepare(samples_list, given_target)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
return all_data
def random_batch(self):
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
# return next(iter(self.loader))
def _all_data_(self, col):
return torch.cat([data[col].unsqueeze(0) for data in self.loader.dataset.data], 0)
@staticmethod
def prepare(samples_list, given_target=None, detach=True, gpu=False):
"""
Build inp and target
:param samples_list: list of tensors, [label_0, label_1, ..., label_k]
:param given_target: given a target, len(samples_list) = 1
:param detach: if detach input
:param gpu: if use cuda
:returns inp, target:
- inp: sentences
- target: label index, 0-label_0, 1-label_1, ..., k-label_k
"""
if len(samples_list) == 1 and given_target is not None:
inp = samples_list[0]
if detach:
inp = inp.detach()
target = torch.LongTensor([given_target] * inp.size(0))
if len(inp.size()) == 2: # samples token, else samples onehot
inp = inp.long()
else:
inp = torch.cat(samples_list, dim=0) # !!!need .detach()
if detach:
inp = inp.detach()
target = torch.zeros(inp.size(0)).long()
if len(inp.size()) == 2: # samples token, else samples onehot
inp = inp.long()
for idx in range(1, len(samples_list)):
start = sum([samples_list[i].size(0) for i in range(idx)])
target[start: start + samples_list[idx].size(0)] = idx
# shuffle
perm = torch.randperm(inp.size(0))
inp = inp[perm]
target = target[perm]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
| 5,925 | 35.134146 | 100 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/text_process.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : text_process.py
# @Time : Created at 2019-05-14
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import nltk
import numpy as np
import os
import torch
import config as cfg
def get_tokenlized(file):
"""tokenlize the file"""
tokenlized = list()
with open(file) as raw:
for text in raw:
text = nltk.word_tokenize(text.lower())
tokenlized.append(text)
return tokenlized
def get_word_list(tokens):
"""get word set"""
word_set = list()
for sentence in tokens:
for word in sentence:
word_set.append(word)
return list(set(word_set))
def get_dict(word_set):
"""get word2idx_dict and idx2word_dict"""
word2idx_dict = dict()
idx2word_dict = dict()
index = 2
word2idx_dict[cfg.padding_token] = str(cfg.padding_idx) # padding token
idx2word_dict[str(cfg.padding_idx)] = cfg.padding_token
word2idx_dict[cfg.start_token] = str(cfg.start_letter) # start token
idx2word_dict[str(cfg.start_letter)] = cfg.start_token
for word in word_set:
word2idx_dict[word] = str(index)
idx2word_dict[str(index)] = word
index += 1
return word2idx_dict, idx2word_dict
def text_process(train_text_loc, test_text_loc=None):
"""get sequence length and dict size"""
train_tokens = get_tokenlized(train_text_loc)
if test_text_loc is None:
test_tokens = list()
else:
test_tokens = get_tokenlized(test_text_loc)
word_set = get_word_list(train_tokens + test_tokens)
word2idx_dict, idx2word_dict = get_dict(word_set)
if test_text_loc is None:
sequence_len = len(max(train_tokens, key=len))
else:
sequence_len = max(len(max(train_tokens, key=len)), len(max(test_tokens, key=len)))
return sequence_len, len(word2idx_dict)
# ============================================
def init_dict(dataset):
"""
Initialize dictionaries of dataset, please note that '0': padding_idx, '1': start_letter.
Finally save dictionary files locally.
"""
tokens = get_tokenlized('dataset/{}.txt'.format(dataset))
word_set = get_word_list(tokens)
word2idx_dict, idx2word_dict = get_dict(word_set)
with open('dataset/{}_wi_dict.txt'.format(dataset), 'w') as dictout:
dictout.write(str(word2idx_dict))
with open('dataset/{}_iw_dict.txt'.format(dataset), 'w') as dictout:
dictout.write(str(idx2word_dict))
print('total tokens: ', len(word2idx_dict))
def load_dict(dataset):
"""Load dictionary from local files"""
iw_path = 'dataset/{}_iw_dict.txt'.format(dataset)
wi_path = 'dataset/{}_wi_dict.txt'.format(dataset)
if not os.path.exists(iw_path) or not os.path.exists(iw_path): # initialize dictionaries
init_dict(dataset)
with open(iw_path, 'r') as dictin:
idx2word_dict = eval(dictin.read().strip())
with open(wi_path, 'r') as dictin:
word2idx_dict = eval(dictin.read().strip())
return word2idx_dict, idx2word_dict
def load_test_dict(dataset):
"""Build test data dictionary, extend from train data. For the classifier."""
word2idx_dict, idx2word_dict = load_dict(dataset) # train dict
# tokens = get_tokenlized('dataset/testdata/{}_clas_test.txt'.format(dataset))
tokens = get_tokenlized('dataset/testdata/{}_test.txt'.format(dataset))
word_set = get_word_list(tokens)
index = len(word2idx_dict) # current index
# extend dict with test data
for word in word_set:
if word not in word2idx_dict:
word2idx_dict[word] = str(index)
idx2word_dict[str(index)] = word
index += 1
return word2idx_dict, idx2word_dict
def tensor_to_tokens(tensor, dictionary):
"""transform Tensor to word tokens"""
tokens = []
for sent in tensor:
sent_token = []
for word in sent.tolist():
if word == cfg.padding_idx:
break
sent_token.append(dictionary[str(word)])
tokens.append(sent_token)
return tokens
def tokens_to_tensor(tokens, dictionary):
"""transform word tokens to Tensor"""
global i
tensor = []
for sent in tokens:
sent_ten = []
for i, word in enumerate(sent):
if word == cfg.padding_token:
break
sent_ten.append(int(dictionary[str(word)]))
while i < cfg.max_seq_len - 1:
sent_ten.append(cfg.padding_idx)
i += 1
tensor.append(sent_ten[:cfg.max_seq_len])
return torch.LongTensor(tensor)
def padding_token(tokens):
"""pad sentences with padding_token"""
global i
pad_tokens = []
for sent in tokens:
sent_token = []
for i, word in enumerate(sent):
if word == cfg.padding_token:
break
sent_token.append(word)
while i < cfg.max_seq_len - 1:
sent_token.append(cfg.padding_token)
i += 1
pad_tokens.append(sent_token)
return pad_tokens
def write_tokens(filename, tokens):
"""Write word tokens to a local file (For Real data)"""
with open(filename, 'w') as fout:
for sent in tokens:
fout.write(' '.join(sent))
fout.write('\n')
def write_tensor(filename, tensor):
"""Write Tensor to a local file (For Oracle data)"""
with open(filename, 'w') as fout:
for sent in tensor:
fout.write(' '.join([str(i) for i in sent.tolist()]))
fout.write('\n')
def process_cat_text():
import random
dataset = 'mr'
test_ratio = 0.3
seq_len = 15
pos_file = 'dataset/{}/{}{}_cat1.txt'.format(dataset, dataset, seq_len)
neg_file = 'dataset/{}/{}{}_cat0.txt'.format(dataset, dataset, seq_len)
pos_sent = open(pos_file, 'r').readlines()
neg_sent = open(neg_file, 'r').readlines()
pos_len = int(test_ratio * len(pos_sent))
neg_len = int(test_ratio * len(neg_sent))
random.shuffle(pos_sent)
random.shuffle(neg_sent)
all_sent_test = pos_sent[:pos_len] + neg_sent[:neg_len]
all_sent_train = pos_sent[pos_len:] + neg_sent[neg_len:]
random.shuffle(all_sent_test)
random.shuffle(all_sent_train)
f_pos_train = open('dataset/{}{}_cat1.txt'.format(dataset, seq_len), 'w')
f_neg_train = open('dataset/{}{}_cat0.txt'.format(dataset, seq_len), 'w')
f_pos_test = open('dataset/testdata/{}{}_cat1_test.txt'.format(dataset, seq_len), 'w')
f_neg_test = open('dataset/testdata/{}{}_cat0_test.txt'.format(dataset, seq_len), 'w')
for p_s in pos_sent[:pos_len]:
f_pos_test.write(p_s)
for n_s in neg_sent[:neg_len]:
f_neg_test.write(n_s)
for p_s in pos_sent[pos_len:]:
f_pos_train.write(p_s)
for n_s in neg_sent[neg_len:]:
f_neg_train.write(n_s)
with open('dataset/testdata/{}{}_test.txt'.format(dataset, seq_len), 'w') as fout:
for sent in all_sent_test:
fout.write(sent)
with open('dataset/{}{}.txt'.format(dataset, seq_len), 'w') as fout:
for sent in all_sent_train:
fout.write(sent)
f_pos_train.close()
f_neg_train.close()
f_pos_test.close()
f_neg_test.close()
def combine_amazon_text():
cat0_name = 'app'
cat1_name = 'book'
root_path = 'dataset/'
cat0_train = open(root_path + cat0_name + '.txt', 'r').readlines()
cat0_test = open(root_path + cat0_name + '_test.txt', 'r').readlines()
cat1_train = open(root_path + cat1_name + '.txt', 'r').readlines()
cat1_test = open(root_path + cat1_name + '_test.txt', 'r').readlines()
with open(root_path + 'amazon_{}_{}.txt'.format(cat0_name, cat1_name), 'w') as fout:
for sent in cat0_train:
fout.write(sent)
for sent in cat1_train:
fout.write(sent)
with open(root_path + 'testdata/amazon_{}_{}_test.txt'.format(cat0_name, cat1_name), 'w') as fout:
for sent in cat0_test:
fout.write(sent)
for sent in cat1_test:
fout.write(sent)
def extend_clas_train_data():
data_name = 'mr'
dataset = 'mr20'
neg_filter_file = 'dataset/{}/{}_cat0.txt'.format(data_name, dataset) # include train and test for generator
pos_filter_file = 'dataset/{}/{}_cat1.txt'.format(data_name, dataset)
neg_test_file = 'dataset/testdata/{}_cat0_test.txt'.format(dataset)
pos_test_file = 'dataset/testdata/{}_cat1_test.txt'.format(dataset)
neg_all_file = 'dataset/{}/{}_cat0.txt'.format(data_name, data_name)
pos_all_file = 'dataset/{}/{}_cat1.txt'.format(data_name, data_name)
neg_filter = open(neg_filter_file, 'r').readlines()
pos_filter = open(pos_filter_file, 'r').readlines()
neg_test = open(neg_test_file, 'r').readlines()
pos_test = open(pos_test_file, 'r').readlines()
neg_all = open(neg_all_file, 'r').readlines()
pos_all = open(pos_all_file, 'r').readlines()
# print('neg filter:', len(neg_filter))
# print('neg test:', len(neg_test))
# print('neg all:', len(neg_all))
# print('pos filter:', len(pos_filter))
# print('pos test:', len(pos_test))
# print('pos all:', len(pos_all))
print('neg before:', len(neg_test))
for line in neg_all:
if line not in neg_filter:
neg_test.append(line)
print('neg after:', len(neg_test))
print('pos before:', len(pos_test))
for line in pos_all:
if line not in pos_filter:
pos_test.append(line)
print('pos after:', len(pos_test))
with open('dataset/testdata/{}_cat0_clas_test.txt'.format(dataset), 'w') as fout:
for line in neg_test:
fout.write(line)
with open('dataset/testdata/{}_cat1_clas_test.txt'.format(dataset), 'w') as fout:
for line in pos_test:
fout.write(line)
with open('dataset/testdata/{}_clas_test.txt'.format(dataset), 'w') as fout:
for line in neg_test:
fout.write(line)
for line in pos_test:
fout.write(line)
def load_word_vec(path, word2idx_dict=None, type='glove'):
"""Load word embedding from local file"""
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
if type == 'glove':
word2vec_dict = {}
for line in fin:
tokens = line.rstrip().split()
if word2idx_dict is None or tokens[0] in word2idx_dict.keys():
word2vec_dict[tokens[0]] = np.asarray(tokens[1:], dtype='float32')
elif type == 'word2vec':
import gensim
word2vec_dict = gensim.models.KeyedVectors.load_word2vec_format(path, binary=True)
else:
raise NotImplementedError('No such type: %s' % type)
return word2vec_dict
def build_embedding_matrix(dataset):
"""Load or build Glove embedding matrix."""
embed_filename = 'dataset/glove_embedding_300d_{}.pt'.format(dataset)
if os.path.exists(embed_filename):
print('Loading embedding:', embed_filename)
embedding_matrix = torch.load(embed_filename)
else:
print('Loading Glove word vectors...')
word2idx_dict, _ = load_dict(dataset)
embedding_matrix = np.random.random((len(word2idx_dict) + 2, 300)) # 2 for padding token and start token
fname = '../glove.42B.300d.txt' # Glove file
# fname = '../GoogleNews-vectors-negative300.bin' # Google Word2Vec file
word2vec_dict = load_word_vec(fname, word2idx_dict=word2idx_dict, type='glove')
print('Building embedding matrix:', embed_filename)
for word, i in word2idx_dict.items():
if word in word2vec_dict:
# words not found in embedding index will be randomly initialized.
embedding_matrix[int(i)] = word2vec_dict[word]
embedding_matrix = torch.FloatTensor(embedding_matrix)
torch.save(embedding_matrix, embed_filename)
return embedding_matrix
if __name__ == '__main__':
os.chdir('../')
# process_cat_text()
# load_test_dict('mr15')
# extend_clas_train_data()
pass
| 12,120 | 33.240113 | 113 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/helpers.py | import logging
import sys
from time import strftime, gmtime
import numpy as np
import torch
import torch.nn as nn
from metrics.nll import NLL
from utils.data_loader import GenDataIter
from torch.nn import functional as F
class Signal:
"""Running signal to control training process"""
def __init__(self, signal_file):
self.signal_file = signal_file
self.pre_sig = True
self.adv_sig = True
self.update()
def update(self):
signal_dict = self.read_signal()
self.pre_sig = signal_dict['pre_sig']
self.adv_sig = signal_dict['adv_sig']
def read_signal(self):
with open(self.signal_file, 'r') as fin:
return eval(fin.read())
def create_logger(name, silent=False, to_disk=False, log_file=None):
"""Create a new logger"""
# setup logger
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
log.propagate = False
formatter = logging.Formatter(fmt='%(message)s', datefmt='%Y/%m/%d %I:%M:%S')
if not silent:
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
log.addHandler(ch)
if to_disk:
log_file = log_file if log_file is not None else strftime("log/log_%m%d_%H%M.txt", gmtime())
if type(log_file) == list:
for filename in log_file:
fh = logging.FileHandler(filename, mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
log.addHandler(fh)
if type(log_file) == str:
fh = logging.FileHandler(log_file, mode='w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
log.addHandler(fh)
return log
def create_oracle():
"""Create a new Oracle model and Oracle's samples"""
import config as cfg
from models.Oracle import Oracle
print('Creating Oracle...')
oracle = Oracle(cfg.gen_embed_dim, cfg.gen_hidden_dim, cfg.vocab_size,
cfg.max_seq_len, cfg.padding_idx, gpu=cfg.CUDA)
if cfg.CUDA:
oracle = oracle.cuda()
torch.save(oracle.state_dict(), cfg.oracle_state_dict_path)
big_samples = oracle.sample(cfg.samples_num, 4 * cfg.batch_size)
# large
torch.save(big_samples, cfg.oracle_samples_path.format(cfg.samples_num))
# small
torch.save(oracle.sample(cfg.samples_num // 2, 4 * cfg.batch_size),
cfg.oracle_samples_path.format(cfg.samples_num // 2))
oracle_data = GenDataIter(big_samples)
mle_criterion = nn.NLLLoss()
groud_truth = NLL.cal_nll(oracle, oracle_data.loader, mle_criterion)
print('NLL_Oracle Groud Truth: %.4f' % groud_truth)
def get_fixed_temperature(temper, i, N, adapt):
import config as cfg
"""A function to set up different temperature control policies"""
N = 5000
if adapt == 'no':
temper_var_np = 1.0 # no increase, origin: temper
elif adapt == 'lin':
temper_var_np = 1 + i / (N - 1) * (temper - 1) # linear increase
elif adapt == 'exp':
temper_var_np = temper ** (i / N) # exponential increase
elif adapt == 'log':
temper_var_np = 1 + (temper - 1) / np.log(N) * np.log(i + 1) # logarithm increase
elif adapt == 'sigmoid':
temper_var_np = (temper - 1) * 1 / (1 + np.exp((N / 2 - i) * 20 / N)) + 1 # sigmoid increase
elif adapt == 'quad':
temper_var_np = (temper - 1) / (N - 1) ** 2 * i ** 2 + 1
elif adapt == 'sqrt':
temper_var_np = (temper - 1) / np.sqrt(N - 1) * np.sqrt(i) + 1
else:
raise Exception("Unknown adapt type!")
return temper_var_np * cfg.temp_scale
def get_losses(d_out_real, d_out_fake, loss_type='JS'):
"""Get different adversarial losses according to given loss_type"""
bce_loss = nn.BCEWithLogitsLoss()
if loss_type == 'standard': # the non-satuating GAN loss
d_loss_real = bce_loss(d_out_real, torch.ones_like(d_out_real))
d_loss_fake = bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
d_loss = d_loss_real + d_loss_fake
g_loss = bce_loss(d_out_fake, torch.ones_like(d_out_fake))
elif loss_type == 'JS': # the vanilla GAN loss
d_loss_real = bce_loss(d_out_real, torch.ones_like(d_out_real))
d_loss_fake = bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
d_loss = d_loss_real + d_loss_fake
g_loss = -d_loss_fake
elif loss_type == 'KL': # the GAN loss implicitly minimizing KL-divergence
d_loss_real = bce_loss(d_out_real, torch.ones_like(d_out_real))
d_loss_fake = bce_loss(d_out_fake, torch.zeros_like(d_out_fake))
d_loss = d_loss_real + d_loss_fake
g_loss = torch.mean(-d_out_fake)
elif loss_type == 'hinge': # the hinge loss
d_loss_real = torch.mean(nn.ReLU(1.0 - d_out_real))
d_loss_fake = torch.mean(nn.ReLU(1.0 + d_out_fake))
d_loss = d_loss_real + d_loss_fake
g_loss = -torch.mean(d_out_fake)
elif loss_type == 'tv': # the total variation distance
d_loss = torch.mean(nn.Tanh(d_out_fake) - nn.Tanh(d_out_real))
g_loss = torch.mean(-nn.Tanh(d_out_fake))
elif loss_type == 'rsgan': # relativistic standard GAN
d_loss = bce_loss(d_out_real - d_out_fake, torch.ones_like(d_out_real))
g_loss = bce_loss(d_out_fake - d_out_real, torch.ones_like(d_out_fake))
elif loss_type == 'WGAN': # relativistic standard GAN
d_loss = torch.mean(d_out_fake - d_out_real)
g_loss = -torch.mean(d_out_fake)
elif loss_type == 'WWGAN': # relativistic standard GAN
with torch.no_grad():
W = d_out_fake.shape[0] * F.softmax(d_out_fake.data, dim = 0)
# loss_d_clas = (W*soft_logits).mean() - clas_logits.mean() + gp
d_loss = torch.mean(W*d_out_fake - d_out_real)
g_loss = -torch.mean(d_out_fake)
else:
raise NotImplementedError("Divergence '%s' is not implemented" % loss_type)
return g_loss, d_loss
def truncated_normal_(tensor, mean=0, std=1):
"""
Implemented by @ruotianluo
See https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15
"""
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
return tensor
| 6,456 | 34.872222 | 101 | py |
PPOGAN | PPOGAN-master/unsupervised_text_generation/utils/rollout.py | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : rollout.py
# @Time : Created at 2019-03-15
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import copy
import torch
import torch.nn.functional as F
class ROLLOUT:
def __init__(self, gen, gpu=True):
self.gen = gen
self.old_model = copy.deepcopy(gen)
self.max_seq_len = gen.max_seq_len
self.vocab_size = gen.vocab_size
self.step_size = gen.step_size if gen.name == 'leakgan' else 0
self.goal_out_size = gen.goal_out_size if gen.name == 'leakgan' else 0
self.gpu = gpu
def rollout_mc_search(self, sentences, given_num):
"""
fill up remain tokens with MC search
:param sentences: size of batch_size * max_seq_len
:param given_num:
:return:
"""
batch_size = sentences.size(0)
# get current state
hidden = self.gen.init_hidden(batch_size)
# for i in range(given_num):
inp = sentences[:, :given_num]
out, hidden = self.gen.forward(inp, hidden, need_hidden=True)
out = out.view(batch_size, -1, self.vocab_size)[:, -1]
samples = torch.zeros(batch_size, self.max_seq_len).long()
samples[:, :given_num] = sentences[:, :given_num]
if self.gpu:
samples = samples.cuda()
# MC search
for i in range(given_num, self.max_seq_len):
out = torch.multinomial(torch.exp(out), 1)
samples[:, i] = out.view(-1).data
inp = out.view(-1)
out, hidden = self.gen.forward(inp, hidden, need_hidden=True)
return samples
def rollout_mc_search_leakgan(self, sentences, dis, given_num):
batch_size, seq_len = sentences.size()
goal_array = torch.zeros((batch_size, seq_len + 1, self.goal_out_size))
work_hidden = self.gen.init_hidden(batch_size)
mana_hidden = self.gen.init_hidden(batch_size)
real_goal = self.gen.goal_init[:batch_size, :]
out = 0
if self.gpu:
goal_array = goal_array.cuda()
real_goal = real_goal.cuda()
# get current state
for i in range(given_num):
# Get feature.
dis_inp = torch.zeros(batch_size, seq_len).long()
dis_inp[:, :i + 1] = sentences[:, :i + 1] # cut sentences
leak_inp = sentences[:, i]
if self.gpu:
dis_inp = dis_inp.cuda()
leak_inp = leak_inp.cuda()
feature = dis.get_feature(dis_inp).unsqueeze(0)
# Get output of one token
# cur_goal: batch_size * 1 * goal_out_size
out, cur_goal, work_hidden, mana_hidden = self.gen(i, leak_inp, work_hidden, mana_hidden,
feature, real_goal, train=True)
# Save goal and update last_goal
goal_array[:, i, :] = cur_goal.squeeze(1)
if i > 0 and i % self.step_size == 0:
real_goal = torch.sum(goal_array[:, i - 3:i + 1, :], dim=1)
if i / self.step_size == 1:
real_goal += self.gen.goal_init[:batch_size, :]
samples = torch.zeros(batch_size, self.max_seq_len).long()
samples[:, :given_num] = sentences[:, :given_num]
# MC search
for i in range(given_num, self.max_seq_len):
# Sample one token
out = torch.multinomial(torch.exp(out), 1).view(-1) # [num_samples] (sampling from each row)
samples[:, i] = out.data
# Get feature
dis_inp = samples
if self.gpu:
dis_inp = dis_inp.cuda()
feature = dis.get_feature(dis_inp).unsqueeze(0)
leak_inp = out
# Get output of one token
# cur_goal: batch_size * 1 * goal_out_size
out, cur_goal, work_hidden, mana_hidden = self.gen(i, leak_inp, work_hidden, mana_hidden,
feature, real_goal, train=True)
# Save goal and update last_goal
goal_array[:, i, :] = cur_goal.squeeze(1)
if i > 0 and i % self.step_size == 0:
real_goal = torch.sum(goal_array[:, i - 3:i + 1, :], dim=1)
if i / self.step_size == 1:
real_goal += self.gen.goal_init[:batch_size, :]
if self.gpu:
samples = samples.cuda()
return samples
def get_reward(self, sentences, rollout_num, dis, current_k=0):
"""
get reward via Monte Carlo search
:param sentences: size of batch_size * max_seq_len
:param rollout_num:
:param dis:
:param current_k: current training gen
:return: reward: [batch_size]
"""
with torch.no_grad():
batch_size = sentences.size(0)
rewards = torch.zeros([rollout_num * self.max_seq_len, batch_size]).float()
if self.gpu:
rewards = rewards.cuda()
idx = 0
for i in range(rollout_num):
for given_num in range(1, self.max_seq_len + 1):
samples = self.rollout_mc_search(sentences, given_num)
out = dis.forward(samples)
out = F.softmax(out, dim=-1)
reward = out[:, current_k + 1]
rewards[idx] = reward
idx += 1
# rewards = torch.mean(rewards, dim=0)
rewards = torch.mean(rewards.view(batch_size, self.max_seq_len, rollout_num), dim=-1)
return rewards
def get_reward_leakgan(self, sentences, rollout_num, dis, current_k):
"""
get reward via Monte Carlo search for LeakGAN
:param sentences: size of batch_size * max_seq_len
:param rollout_num:
:param dis:
:param current_k: current training gen
:return: reward: batch_size * (max_seq_len / step_size)
"""
with torch.no_grad():
batch_size = sentences.size(0)
rewards = torch.zeros([rollout_num * (self.max_seq_len // self.step_size), batch_size]).float()
if self.gpu:
rewards = rewards.cuda()
idx = 0
for i in range(rollout_num):
for t in range(self.max_seq_len // self.step_size):
given_num = t * self.step_size + 1 # 1, 5, 9, ..
samples = self.rollout_mc_search_leakgan(sentences, dis, given_num)
out = dis(samples)
out = F.softmax(out, dim=-1)
reward = out[:, current_k + 1]
rewards[idx] = reward
idx += 1
rewards = rewards.view(batch_size, self.max_seq_len // self.step_size, rollout_num)
rewards = torch.mean(rewards, dim=-1)
return rewards
def get_token_reward(self, sentences, rollout_num, dis, current_k, given_num):
"""
get reward of each token in sequence via Monte Carlo search
"""
with torch.no_grad():
batch_size = sentences.size(0)
rewards = torch.zeros([rollout_num, batch_size]).float()
idx = 0
for i in range(rollout_num):
samples = self.rollout_mc_search(sentences, given_num)
out = dis(samples)
out = F.softmax(out, dim=-1)
reward = out[:, current_k + 1]
rewards[idx] = reward
idx += 1
rewards = torch.Tensor(rewards).cuda()
rewards = torch.sum(rewards, dim=0) / rollout_num
return rewards
| 7,763 | 36.873171 | 107 | py |
PPOGAN | PPOGAN-master/cifar10/main.py | from printlib import print_normal as print
import argparse
import os, sys
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', type=str, default="./",
help='GPUs to use.')
parser.add_argument('--snapshot_dir', type=str, default="./",
help='snapshot')
parser.add_argument('--tb_dir', type=str, default="./",
help='tensorboard')
parser.add_argument('--id', type=str, help='unique identifier')
opt = parser.parse_args()
sys.stderr = open(os.path.join(opt.output_dir,'err.txt'), 'w')
import yaml
CONF = yaml.load(open(os.path.join(opt.output_dir,'conf.yml')), Loader=yaml.FullLoader)
print(caption = "STARTED")
print(CONF)
LAMBDA = CONF['LAMBDA']
nz = CONF['nz']
import random
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.nn import functional as F
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
from utils import BenchMark as BM
from utils import CheckPoint
from torch import autograd
writer = SummaryWriter(log_dir=os.path.join(opt.tb_dir, opt.id))
random.seed(CONF['SEED'])
torch.manual_seed(CONF['SEED'])
cudnn.benchmark = True
Dset=CONF['Dset']
DataRoot = CONF['DataRoot']
ImageSize=CONF['ImageSize']
batchSize=CONF['BS']
if Dset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=DataRoot,
transform=transforms.Compose([
transforms.Resize(ImageSize),
transforms.CenterCrop(ImageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif Dset == 'lsun':
dataset = dset.LSUN(root=DataRoot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Resize(ImageSize),
transforms.CenterCrop(ImageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif Dset == 'cifar10':
dataset = dset.CIFAR10(root=DataRoot, download=True,
transform=transforms.Compose([
transforms.Resize(ImageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
elif Dset == 'mnist':
dataset = dset.MNIST(root=DataRoot, download=True,
transform=transforms.Compose([
transforms.Resize(ImageSize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
nc=1
elif Dset == 'fake':
dataset = dset.FakeData(image_size=(3, ImageSize, ImageSize),
transform=transforms.ToTensor())
nc=3
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batchSize,
shuffle=True, num_workers=2)
device = torch.device("cuda:0")
print(caption = "Preparing FID")
bm=BM(dataloader, CONF, 2048, device, nz)
channels = 3
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.kaiming_uniform_(self.conv1.weight.data, mode='fan_in', nonlinearity='relu')
nn.init.kaiming_uniform_(self.conv2.weight.data, mode='fan_in', nonlinearity='relu')
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(),
self.conv2
)
self.bypass = nn.Sequential()
if stride != 1:
self.bypass = nn.Upsample(scale_factor=2)
def forward(self, x):
return self.model(x) + self.bypass(x)
class ResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, shape, stride=1, bn=False):
super(ResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.kaiming_uniform_(self.conv1.weight.data, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_uniform_(self.conv2.weight.data, mode='fan_out', nonlinearity='relu')
norm = lambda c : (nn.Sequential() if not bn else nn.BatchNorm2d(c))
if stride == 1:
self.model = nn.Sequential(
norm(in_channels),
nn.ReLU(),
self.conv1,
norm(out_channels),
nn.ReLU(),
self.conv2,
)
else:
self.model = nn.Sequential(
norm(in_channels),
nn.ReLU(),
self.conv1,
norm(out_channels),
nn.ReLU(),
self.conv2,
nn.AvgPool2d(2, stride=stride, padding=0)
)
self.bypass = nn.Sequential()
if stride != 1:
self.bypass_conv = nn.Conv2d(in_channels,out_channels, 1, 1, padding=0)
nn.init.xavier_uniform(self.bypass_conv.weight.data, nn.init.calculate_gain('linear'))
self.bypass = nn.Sequential(
self.bypass_conv,
nn.AvgPool2d(2, stride=stride, padding=0)
)
def forward(self, x):
return self.model(x) + self.bypass(x)
# special ResBlock just for the first layer of the discriminator
class FirstResBlockDiscriminator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(FirstResBlockDiscriminator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
self.bypass_conv = nn.Conv2d(in_channels, out_channels, 1, 1, padding=0)
nn.init.kaiming_uniform_(self.conv1.weight.data, mode='fan_out', nonlinearity='relu')
nn.init.kaiming_uniform_(self.conv2.weight.data, mode='fan_out', nonlinearity='relu')
nn.init.xavier_uniform(self.bypass_conv.weight.data, nn.init.calculate_gain('linear'))
# we don't want to apply ReLU activation to raw image before convolution transformation.
self.model = nn.Sequential(
self.conv1,
nn.ReLU(),
self.conv2,
nn.AvgPool2d(2)
)
self.bypass = nn.Sequential(
nn.AvgPool2d(2),
self.bypass_conv,
)
def forward(self, x):
return self.model(x) + self.bypass(x)
GEN_SIZE=CONF['G_SIZE']
DISC_SIZE=int(CONF['G_SIZE']*CONF['D_SCALE'])
class Generator(nn.Module):
def __init__(self, z_dim):
super(Generator, self).__init__()
self.z_dim = z_dim
self.dense = nn.Linear(self.z_dim, 4 * 4 * GEN_SIZE)
self.final = nn.Conv2d(GEN_SIZE, channels, 3, stride=1, padding=1)
nn.init.xavier_uniform(self.dense.weight.data, nn.init.calculate_gain('linear'))
nn.init.xavier_uniform(self.final.weight.data, nn.init.calculate_gain('tanh'))
self.model = nn.Sequential(
ResBlockGenerator(GEN_SIZE, GEN_SIZE, stride=2),
ResBlockGenerator(GEN_SIZE, GEN_SIZE, stride=2),
ResBlockGenerator(GEN_SIZE, GEN_SIZE, stride=2),
nn.BatchNorm2d(GEN_SIZE),
nn.ReLU(),
self.final,
nn.Tanh())
def forward(self, z):
return self.model(self.dense(z).view(-1, GEN_SIZE, 4, 4))
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
# self.model = nn.Sequential(
self.r1 = FirstResBlockDiscriminator(channels, DISC_SIZE, stride=2)
self.r2 = ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 16, stride=2)
self.r3 = ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 8)
self.r4 = ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 8)
# nn.ReLU(),
self.pool = nn.AvgPool2d(8)
# )
self.fc = nn.Linear(DISC_SIZE, 1)
nn.init.xavier_uniform(self.fc.weight.data, nn.init.calculate_gain('linear'))
def forward(self, x, d1=0.2, d2=0.5, d3=0.5):
x = F.dropout(self.r2(self.r1(x)), p=d1)
x = F.dropout(self.r3(x), p=d2)
o2 = self.pool(F.relu(F.dropout(self.r4(x), p=d3), inplace=True))
return self.fc(o2.view(-1,DISC_SIZE)), o2.squeeze()
class Discriminator_D(nn.Module):
def __init__(self):
super(Discriminator_D, self).__init__()
self.model = nn.Sequential(
FirstResBlockDiscriminator(channels, DISC_SIZE, stride=2),
ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 16, stride=2, bn = True),
ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 8, bn = True),
ResBlockDiscriminator(DISC_SIZE, DISC_SIZE, 8, bn = True),
nn.ReLU(),
nn.AvgPool2d(8),
)
self.fc = nn.Linear(DISC_SIZE, 1)
nn.init.xavier_uniform(self.fc.weight.data, nn.init.calculate_gain('linear'))
def forward(self, x):
return F.sigmoid(self.fc(self.model(x).view(-1,DISC_SIZE)))
netG = Generator(nz).cuda()
netD = Discriminator().cuda()
D = Discriminator_D().cuda()
print(netG)
print(netD)
print(D)
criterion = nn.BCELoss()
def calc_gradient_penalty(netD, real_data, fake_data, BATCH_SIZE):
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, ImageSize, ImageSize)
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)[0]
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
fixed_noise = torch.randn(batchSize, nz, device=device)
real_label = 1
fake_label = 0
optimizerD = optim.Adam(netD.parameters(), lr=CONF['LR'], betas=(0.0,0.9))
schedulerD = optim.lr_scheduler.LambdaLR(optimizerD, lr_lambda=[lambda epoch: max(0., 1.-epoch/(CONF['niter']*len(dataloader)))])
optimizerD_D = optim.Adam(D.parameters(), lr=CONF['LR']/CONF['PPO_iters'], betas=(0.0,0.9))
schedulerD_D = optim.lr_scheduler.LambdaLR(optimizerD_D, lr_lambda=[lambda epoch: max(0., 1.-epoch/(CONF['niter']*len(dataloader)))])
optimizerG = optim.Adam(netG.parameters(), lr=CONF['LR']/CONF['PPO_iters'], betas=(0.0,0.9))
schedulerG = optim.lr_scheduler.LambdaLR(optimizerG, lr_lambda=[lambda epoch: max(0., 1.-epoch/(CONF['niter']*len(dataloader)))])
niter = CONF['niter']
# c=0
one = torch.FloatTensor([1])
mone = one * -1
one, mone = one.cuda(), mone.cuda()
from tqdm import tqdm_notebook
bFID = 1000.0
bIS = 0.0
mean = 0.0
G_progress = 0
diff = 0
while G_progress < niter*len(dataloader):
data_iter = iter(dataloader)
i=0
while i<len(dataloader):
j = 0
while j < CONF['Diters'] and i < len(dataloader):
data = data_iter.next()
i += 1
j += 1
############################
# (1) Update D network: maximize D(x) - D(G(z))
###########################
# train with real
netD.zero_grad()
real_cpu = data[0].to(device)
batch_size = real_cpu.size(0)
D_real1_1, D_real1_2 = netD(real_cpu)
errD_real = D_real1_1.mean()
errD_real.backward(mone,retain_graph=True)
D_x = D_real1_1.mean().item()
D_real2_1, D_real2_2 = netD(real_cpu)
ct_penalty = CONF["LAMBDA2"]*((D_real1_1-D_real2_1)**2)
ct_penalty = ct_penalty + CONF["LAMBDA2"]*0.1*((D_real1_2-D_real2_2)**2).mean(dim=1)
ct_penalty = torch.max(0. * (ct_penalty-CONF["Factor_M"]),ct_penalty-CONF["Factor_M"])
ct_penalty = ct_penalty.mean()
ct_penalty.backward()
# train with fake
noise = torch.randn(batch_size, nz, device=device)
fake = netG(noise)
D_fake1_1, _ = netD(fake.detach())
with torch.no_grad():
W = batch_size * F.softmax(D_fake1_1.data, dim = 0)
errD_fake = (W*D_fake1_1).mean()
errD_fake.backward(one)
gradient_penalty = calc_gradient_penalty(netD, real_cpu.data, fake.data, batch_size)
gradient_penalty.backward()
D_G_z1 = D_fake1_1.mean().item()
errD = errD_real - errD_fake
errD_GP = -(errD_real - errD_fake) + gradient_penalty
optimizerD.step()
############################
# (2) Update G network: maximize D(G(z))
###########################
j = 0
with torch.no_grad():
D0 = (D(fake.data)).data
P0 = (1.-D0)/torch.clamp(D0, min = 1e-7)
while j < CONF['PPO_iters'] and i < len(dataloader):
data = data_iter.next()
i += 1
j += 1
D.zero_grad()
real_tmp = data[0].to(device)
batch_size = real_tmp.size(0)
label = torch.full((batch_size,), real_label, device=device)
output = D(real_tmp)
errDD_real = criterion(output, label)
errDD_real.backward()
label.fill_(fake_label)
Noise = torch.randn(batch_size, nz, device=device)
fake = netG(Noise)
output = D(fake.detach())
errDD_fake = criterion(output, label)
errDD_fake.backward()
nn.utils.clip_grad_norm_(D.parameters(), CONF['max_grad_norm'])
optimizerD_D.step()
netG.zero_grad()
fake = netG(noise)
D1 = D(netG(noise))
P1 = (1.-D1)
ratio = (P1/torch.clamp(D1*P0, min = 1e-7))
adv_targ, _ = netD(fake)
surr1 = ratio * adv_targ
ratio_clipped = torch.clamp(ratio, 1.0 - CONF['clip_param'], 1.0 + CONF['clip_param'])
surr2 = ratio_clipped * adv_targ
target = torch.where(adv_targ>0, torch.min(surr1, surr2), torch.max(surr1, surr2))
errG = target.mean()
errG.backward(mone)
D_G_z2 = errG.item()
optimizerG.step()
writer.add_histogram('R_{}'.format(j), ratio.data.cpu().numpy(), global_step=G_progress)
writer.add_histogram('R_clip_{}'.format(j), ratio_clipped.data.cpu().numpy(), global_step=G_progress)
writer.add_histogram('Adv_{}'.format(j), adv_targ.data.cpu().numpy(), global_step=G_progress)
writer.add_histogram('L_{}'.format(j), target.data.cpu().numpy(), global_step=G_progress)
writer.add_scalar("Loss_D", errD.item(), global_step=G_progress)
writer.add_scalar("Loss_G", errG.item(), global_step=G_progress)
writer.add_scalar("Loss_D_GP", errD_GP.item(), global_step=G_progress)
writer.add_scalars("stats", {'D(G)_1':D_G_z1, 'D(G)_2':D_G_z2}, global_step=G_progress)
if G_progress % 1000 == 0:
netG.eval()
(mean, std), FID = bm.calculate_fid_IS(netG)
if bIS < mean:
bIS = mean
torch.save(netG, os.path.join(opt.output_dir,"G.cpt"))
torch.save(netD, os.path.join(opt.output_dir,"D.cpt"))
torch.save(D, os.path.join(opt.output_dir,"D_D.cpt"))
print(value = bIS, caption = "{} : {}".format(opt.id, bIS))
if FID < bFID:
bFID = FID
writer.add_scalar("Inception_mean", mean, global_step=G_progress)
writer.add_scalar("FID", FID, global_step=G_progress)
netG.train()
G_progress+=1
schedulerD.step()
schedulerG.step()
schedulerD_D.step()
writer.add_image("real_sample", torchvision.utils.make_grid(real_cpu, nrow=8, normalize = True), global_step=G_progress)
writer.add_image("fake_sample", torchvision.utils.make_grid(fake.data, nrow=8, normalize = True), global_step=G_progress)
print(progress = "{}/{}".format(niter*len(dataloader),niter*len(dataloader)), caption = "Done... Best IS:{}".format(bIS)) | 17,320 | 37.749441 | 133 | py |
PPOGAN | PPOGAN-master/cifar10/fid-score.py | #!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from scipy.misc import imread
from torch.nn.functional import adaptive_avg_pool2d
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from inception import InceptionV3
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if len(files) % batch_size != 0:
print(('Warning: number of images is not a multiple of the '
'batch size. Some samples are going to be ignored.'))
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
n_batches = len(files) // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, dims))
for i in tqdm(range(n_batches)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i * batch_size
end = start + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
# if __name__ == '__main__':
# args = parser.parse_args()
# os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# fid_value = calculate_fid_given_paths(args.path,
# args.batch_size,
# args.gpu != '',
# args.dims)
# print('FID: ', fid_value) | 9,861 | 36.785441 | 79 | py |
PPOGAN | PPOGAN-master/cifar10/inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth'
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=[DEFAULT_BLOCK_INDEX],
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp[0]
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = models.inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) | 11,625 | 36.503226 | 126 | py |
PPOGAN | PPOGAN-master/cifar10/utils.py | import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision
from torch.utils.tensorboard import SummaryWriter
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torchvision.models.inception import inception_v3
from torch.nn.functional import adaptive_avg_pool2d
import pathlib
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from inception import *
from tqdm import tqdm
import math
from inception_score import IS
import pickle, os, time
import numpy as np
class BenchMark:
def __init__(self, dset, CONF, dims, device, nz):
self.Dset=CONF['Dset']
self.IS = IS(os.path.join(CONF['DataRoot'], 'imagenet_inceptionNet'))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
self.device = device
self.model = InceptionV3([block_idx]).to(device)
while(os.path.exists(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl.occ"))):
time.sleep(60)
if not os.path.exists(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl")):
os.makedirs(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl.occ"))
self.m1, self.s1 = self.calculate_activation_statistics(dset, self.model, device, dims=dims)
pickle.dump({'mean': self.m1, 'std': self.s1}, open(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl"), "wb"))
os.rmdir(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl.occ"))
else:
tmp = pickle.load(open(os.path.join(CONF['DataRoot'], CONF['Dset']+"_FID_tmp.pkl"), "rb"))
self.m1, self.s1 = (tmp['mean'],tmp['std'])
self.nz = nz
def gen_dataset(self, model, num_samples=50000, batch_size=50):
D=[]
with torch.no_grad():
for i in range(num_samples//batch_size):
noise = torch.randn(batch_size, self.nz, device=self.device)
fake = model(noise)
D.append(fake.data.cpu())
D=torch.cat(D,0)
return D
###############################################
# FID #
###############################################
def get_activations(self, dset, model, dims=2048, Dset = True):
model.eval()
pred_arr = []
for D in dset:
batch = D[0] if Dset else D
batch = batch.to(self.device)
if self.Dset=='cifar10':
batch = ((batch*0.5)+0.5)
batch_size = batch.shape[0]
pred = model(batch)
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr.append(pred.cpu().numpy().reshape(batch_size, -1))
return np.concatenate(pred_arr)
def calculate_frechet_distance(self, mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(self, dset, model, batch_size=50,
dims=2048, cuda=False, verbose=False, Dset = True):
act = self.get_activations(dset, model, dims, Dset = Dset)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def compute_statistics_of_data(self, data, dim_feature=2048, num_samples=50000, batch_size=50):
with torch.no_grad():
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=2)
m, s = self.calculate_activation_statistics(dataloader, self.model, batch_size, dim_feature, Dset=False)
return m, s
def calculate_fid(self, m, dim_feature=2048, num_samples=50000, batch_size=50):
D = self.gen_dataset(model, num_samples, batch_size)
dataloader = torch.utils.data.DataLoader(D, batch_size=batch_size, shuffle=False, num_workers=2)
m2, s2 = self.calculate_activation_statistics(dataloader, self.model, batch_size, dim_feature, Dset=False)
return self.calculate_frechet_distance(self.m1,self.s1,m2,s2)
###############################################
# IS #
###############################################
def calculate_IS(self, model, num_samples=50000, batch_size=50):
D = self.gen_dataset(model, num_samples, batch_size)
if self.Dset=='cifar10':
D = (((D.cpu().numpy()*0.5)+0.5)*255).astype('int32')
else:
raise Exception("Not Implemented")
return self.IS.get_inception_score(D.transpose(0,2,3,1), 10)
###############################################
# BOTH #
###############################################
def calculate_fid_IS(self, model, dim_feature=2048, num_samples=50000, batch_size=128):
D = self.gen_dataset(model, num_samples, batch_size)
dataloader = torch.utils.data.DataLoader(D, batch_size=batch_size, shuffle=False, num_workers=2)
m2, s2 = self.calculate_activation_statistics(dataloader, self.model, batch_size, dim_feature, Dset=False)
fid = self.calculate_frechet_distance(self.m1,self.s1,m2,s2)
if self.Dset=='cifar10':
D = ((D.cpu().numpy()+1.)*255/2).astype('int32')
else:
raise Exception("Not Implemented")
return self.IS.get_inception_score(D.transpose(0,2,3,1), 10), fid
class CheckPoint(object):
def __init__(self, checkpoints, epoch_length):
L=[]
for r,v in checkpoints:
L.append((math.ceil(epoch_length*r), v))
self.checkpoints=L
self.epoch = 0
def step(self, value):
self.epoch+=1
epoch = self.epoch
if len(self.checkpoints)==0:
return True
e, v = self.checkpoints[0]
if epoch>=e:
self.checkpoints=self.checkpoints[1:]
return v<=value
else:
return True
def update(self, epoch, value):
self.epoch = epoch
if len(self.checkpoints)==0:
return True
e, v = self.checkpoints[0]
if epoch>=e:
self.checkpoints=self.checkpoints[1:]
return v<=value
else:
return True
def next(self):
if len(self.checkpoints)==0:
return 0, 0
return self.checkpoints[0] | 8,919 | 38.821429 | 131 | py |
lsa-zsl | lsa-zsl-main/src/test.py | import random
from datetime import datetime
import torch.backends.cudnn as cudnn
import classifiers.classifier_images as classifier
import datasets.image_util as util
import networks.models as model
from utils import *
log_name = (
"logs/"
+ datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ "_"
+ opt.dataset
+ "_transductive"
)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with" " --cuda")
# load data
data = util.DATA_LOADER(opt)
print("# of training samples: ", data.ntrain)
logger = util.Logger(log_name)
logger.write("Params : %s \n" % (vars(opt)))
best_gzsl_acc = 0
best_zsl_acc = 0
path = "models/GZSL/" + opt.dataset
netG = model.Generator(opt)
netG.load_state_dict(torch.load(path + "/" + "generator.pt"))
netG.eval()
print(netG)
netE = model.Encoder(opt)
netE.load_state_dict(torch.load(path + "/" + "encoder.pt"))
netE.eval()
print(netE)
if opt.cuda:
netG.cuda()
netE.cuda()
syn_feature, syn_label = generate_syn_feature(
netG, data.unseenclasses, data.attribute, 700#, netF=None, netDec=None
) # netDec
# Generalized zero-shot learning
if opt.gzsl:
# Concatenate real seen features with synthesized unseen features
train_X = torch.cat((data.train_feature, syn_feature), 0)
train_Y = torch.cat((data.train_label, syn_label), 0)
nclass = opt.nclass_all
# Train GZSL classifier
gzsl_cls = classifier.CLASSIFIER(
train_X,
train_Y,
data,
nclass,
opt.cuda,
opt.classifier_lr,
0.5,
100,
opt.syn_num,
generalized=True,
) # netDec
if best_gzsl_acc < gzsl_cls.H:
best_acc_seen, best_acc_unseen, best_gzsl_acc = (
gzsl_cls.acc_seen,
gzsl_cls.acc_unseen,
gzsl_cls.H,
)
logger.write(
"GZSL: seen=%.4f, unseen=%.4f, h=%.4f\n"
% (gzsl_cls.acc_seen, gzsl_cls.acc_unseen, gzsl_cls.H)
)
# Zero-shot learning
# Train ZSL classifier
zsl_cls = classifier.CLASSIFIER(
syn_feature,
util.map_label(syn_label, data.unseenclasses),
data,
data.unseenclasses.size(0),
opt.cuda,
opt.classifier_lr,
0.5,
100,
opt.syn_num,
generalized=False,
)
acc = zsl_cls.acc
if best_zsl_acc < acc:
best_zsl_acc = acc
best_zsl_cls = zsl_cls.model.state_dict()
logger.write("ZSL: unseen accuracy=%.4f\n" % (acc))
# reset G to training mode
logger.write("Dataset %s\n" % (opt.dataset))
logger.write("the best ZSL unseen accuracy is %s\n" % (best_zsl_acc))
if opt.gzsl:
logger.write("the best GZSL seen accuracy is %.4f\n" % (best_acc_seen))
logger.write("the best GZSL unseen accuracy is %.4f\n" % (best_acc_unseen))
logger.write("the best GZSL H is %.4f\n" % (best_gzsl_acc))
| 3,058 | 27.324074 | 87 | py |
lsa-zsl | lsa-zsl-main/src/train_images_inductive.py | import os
import random
from datetime import datetime
import loguru
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import classifiers.classifier_images as classifier
import datasets.image_util as util
import networks.models as model
from utils import *
log_name = (
"logs/"
+ datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ "_"
+ opt.dataset
+ "_inductive"
)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
loguru.logger.info("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
loguru.logger.info(
"WARNING: You have a CUDA device, so you should probably run with" " --cuda"
)
# load data
data = util.DATA_LOADER(opt)
loguru.logger.info("# of training samples: ", data.ntrain)
logger = util.Logger(log_name)
logger.write("Params : %s \n" % (vars(opt)))
netG = model.Generator(opt)
netD = model.Discriminator_D1(opt)
netE = model.Encoder(opt)
cls = model.LinearClassifier(2048, opt.nclass_all)
criterion = nn.CrossEntropyLoss()
loguru.logger.info(netG)
loguru.logger.info(netD)
###########
# Init Tensors
input_res = torch.FloatTensor(opt.batch_size, opt.resSize)
input_att = torch.FloatTensor(
opt.batch_size, opt.attSize
) # attSize class-embedding size
input_label = torch.LongTensor(opt.batch_size)
input_res_unpair = torch.FloatTensor(opt.batch_size, opt.resSize)
input_att_unpair = torch.FloatTensor(opt.batch_size, opt.attSize)
input_label_unpair = torch.LongTensor(opt.batch_size)
noise = torch.FloatTensor(opt.batch_size, opt.nz)
noise_mix = torch.FloatTensor(opt.batch_size * 2, opt.nz)
zeros_mix = torch.zeros(opt.nclass_all, opt.nz)
one = torch.tensor(1, dtype=torch.float)
mone = one * -1
input_all_attributes = torch.FloatTensor(opt.nclass_all, opt.attSize)
##########
# Cuda
use_cuda = False
if opt.cuda:
netD.cuda()
netG.cuda()
netE.cuda()
cls.cuda()
use_cuda = True
input_res = input_res.cuda()
noise, input_att = noise.cuda(), input_att.cuda()
input_label = input_label.cuda()
input_res_unpair = input_res_unpair.cuda()
input_att_unpair = input_att_unpair.cuda()
input_label_unpair = input_label_unpair.cuda()
one = one.cuda()
mone = mone.cuda()
noise_mix = noise_mix.cuda()
zeros_mix = zeros_mix.cuda()
input_all_attributes = input_all_attributes.cuda()
def sample():
batch_feature, batch_label, batch_att = data.next_seen_batch(opt.batch_size)
input_res.copy_(batch_feature)
input_att.copy_(batch_att)
input_label.copy_(batch_label)
batch_feature, batch_label, batch_att = data.next_unseen_batch(opt.batch_size)
input_res_unpair.copy_(batch_feature)
input_att_unpair.copy_(batch_att)
input_label_unpair.copy_(batch_label)
optimizer = optim.Adam(netE.parameters(), lr=opt.lr)
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerCLS = optim.SGD(cls.parameters(), lr=0.005, momentum=0.9, weight_decay=1e-4)
input_all_attributes.copy_(data.attribute)
best_gzsl_acc = 0
best_zsl_acc = 0
best_generator = None
best_encoder = None
best_gzsl_cls, best_zsl_cls = None, None
for epoch in range(0, opt.nepoch):
for loop in range(0, 2):
for i in range(0, data.ntrain, opt.batch_size):
#########Discriminator training ##############
for p in netD.parameters(): # unfreeze discrimator
p.requires_grad = True
# Train D1 and D2
gp_sum = 0 # lAMBDA VARIABLE
for iter_d in range(opt.critic_iter):
sample()
netD.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att)
input_res_unpairv = Variable(input_res_unpair)
input_att_unpairv = Variable(input_att_unpair)
criticD_real = netD(input_resv, input_attv)
criticD_real = opt.gammaD * criticD_real.mean()
criticD_real.backward(mone)
if opt.encoded_noise:
means, log_var = netE(input_resv, input_attv)
std = torch.exp(0.5 * log_var)
eps = torch.randn([opt.batch_size, opt.latent_size]).cpu()
eps = Variable(eps.cuda())
z = eps * std + means # torch.Size([64, 312])
else:
noise.normal_(0, 1)
z = Variable(noise)
fake = netG(z, c=input_attv)
criticD_fake = netD(fake.detach(), input_attv)
criticD_fake = opt.gammaD * criticD_fake.mean()
criticD_fake.backward(one)
# gradient penalty
gradient_penalty = opt.gammaD * calc_gradient_penalty(
netD, input_res, fake.data, input_att
)
# if opt.lambda_mult == 1.1:
gp_sum += gradient_penalty.data
gradient_penalty.backward()
Wasserstein_D = criticD_real - criticD_fake
D_cost = (
criticD_fake - criticD_real + gradient_penalty
) # add Y here and #add vae reconstruction loss
optimizerD.step()
gp_sum /= opt.gammaD * opt.lambda1 * opt.critic_iter
if (gp_sum > 1.05).sum() > 0:
opt.lambda1 *= 1.1
elif (gp_sum < 1.001).sum() > 0:
opt.lambda1 /= 1.1
#############Generator training ##############
# Train Generator
for p in netD.parameters(): # freeze discrimator
p.requires_grad = False
cls.zero_grad()
netE.zero_grad()
netG.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att)
input_res_unpairv = Variable(input_res_unpair)
input_att_unpairv = Variable(input_att_unpair)
if opt.encoded_noise:
means, log_var = netE(input_resv, input_attv)
std = torch.exp(0.5 * log_var)
eps = torch.randn([opt.batch_size, opt.latent_size]).cpu()
eps = Variable(eps.cuda())
z = eps * std + means
recon_x = netG(z, c=input_attv)
vae_loss_seen = loss_fn(recon_x, input_resv, means, log_var)
errG = vae_loss_seen
criticG_fake = netD(recon_x, input_attv).mean()
fake = recon_x
else:
errG = 0
noise.normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev, c=input_attv)
criticG_fake = netD(fake, input_attv).mean()
G_cost = -criticG_fake
errG += opt.gammaG * G_cost
errG.backward()
if opt.encoded_noise:
optimizer.step()
optimizerG.step()
inputs, targets_a, targets_b, lam = ambiguous_data(
input_attv,
input_att_unpair,
input_label,
input_label_unpair,
use_cuda,
) # alpha = 1
inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))
noise_mix.normal_(0, 1)
z = Variable(noise_mix)
outputs = netG(z, c=inputs)
outputs = cls(outputs)
loss = ambiguous_criterion(criterion, outputs, targets_a, targets_b, lam)
loss.backward()
optimizerCLS.step()
optimizerG.step()
logger.write(
"[%d/%d] Loss_D: %.4f Loss_G: %.4f, Wasserstein_dist:%.4f,"
" vae_loss_seen:%.4f\n"
% (
epoch,
opt.nepoch,
D_cost.data.item(),
G_cost.data.item(),
Wasserstein_D.data.item(),
0,
)
)
netG.eval()
syn_feature, syn_label = generate_syn_feature(
netG, data.unseenclasses, data.attribute, 700
)
syn_feature_seen, syn_label_seen = generate_syn_feature(
netG, data.seenclasses, data.attribute, 500
)
# Generalized zero-shot learning
if opt.gzsl:
# Concatenate real seen features with synthesized unseen features
perm = torch.randperm(data.train_feature.size(0))
idx = perm[:400]
X_real_seen = data.train_feature[idx]
Y_real_seen = data.train_label[idx]
X = torch.cat((X_real_seen, syn_feature_seen), 0)
Y = torch.cat((Y_real_seen, syn_label_seen), 0)
train_X = torch.cat((X, syn_feature), 0)
train_Y = torch.cat((Y, syn_label), 0)
nclass = opt.nclass_all
# Train GZSL classifier
gzsl_cls = classifier.CLASSIFIER(
train_X,
train_Y,
data,
nclass,
opt.cuda,
opt.classifier_lr,
0.5,
25,
opt.syn_num,
generalized=True,
)
if best_gzsl_acc < gzsl_cls.H:
best_acc_seen, best_acc_unseen, best_gzsl_acc = (
gzsl_cls.acc_seen,
gzsl_cls.acc_unseen,
gzsl_cls.H,
)
best_generator, best_encoder, best_gzsl_cls = (
netG.state_dict(),
netE.state_dict(),
gzsl_cls.model.state_dict(),
)
logger.write(
"GZSL: seen=%.4f, unseen=%.4f, h=%.4f\n"
% (gzsl_cls.acc_seen, gzsl_cls.acc_unseen, gzsl_cls.H)
)
# Zero-shot learning
# Train ZSL classifier
zsl_cls = classifier.CLASSIFIER(
syn_feature,
util.map_label(syn_label, data.unseenclasses),
data,
data.unseenclasses.size(0),
opt.cuda,
opt.classifier_lr,
0.5,
25,
opt.syn_num,
generalized=False,
)
acc = zsl_cls.acc
if best_zsl_acc < acc:
best_zsl_acc = acc
best_zsl_cls = zsl_cls.model.state_dict()
logger.write("ZSL: unseen accuracy=%.4f\n" % (acc))
# reset G to training mode
netG.train()
netE.train()
#### SAVE MODELS
os.makedirs("models/GZSL/" + opt.dataset, exist_ok=True)
os.makedirs("models/ZSL/" + opt.dataset, exist_ok=True)
torch.save(best_generator, "models/GZSL/" + opt.dataset + "/generator.pt")
torch.save(best_encoder, "models/GZSL/" + opt.dataset + "/encoder.pt")
torch.save(best_gzsl_cls, "models/GZSL/" + opt.dataset + "/gzsl_cls.pt")
torch.save(best_zsl_cls, "models/ZSL/" + opt.dataset + "/zsl_cls.pt")
logger.write("Dataset %s\n" % (opt.dataset))
logger.write("the best ZSL unseen accuracy is %s\n" % (best_zsl_acc))
if opt.gzsl:
logger.write("the best GZSL seen accuracy is %.4f\n" % (best_acc_seen))
logger.write("the best GZSL unseen accuracy is %.4f\n" % (best_acc_unseen))
logger.write("the best GZSL H is %.4f\n" % (best_gzsl_acc))
| 11,148 | 34.848875 | 88 | py |
lsa-zsl | lsa-zsl-main/src/utils.py | from __future__ import print_function
import torch
import torch.autograd as autograd
import torch.nn as nn
from torch.autograd import Variable
from config_images import opt
def loss_fn(recon_x, x, mean, log_var):
BCE = torch.nn.functional.binary_cross_entropy(
recon_x + 1e-12, x.detach(), size_average=False
)
BCE = BCE.sum() / x.size(0)
KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp()) / x.size(0)
return BCE + KLD
def WeightedL1(pred, gt):
wt = (pred - gt).pow(2)
wt /= wt.sum(1).sqrt().unsqueeze(1).expand(wt.size(0), wt.size(1))
loss = wt * (pred - gt).abs()
return loss.sum() / loss.size(0)
def generate_syn_feature(generator, classes, attribute, num):
nclass = classes.size(0)
syn_feature = torch.FloatTensor(nclass * num, opt.resSize)
syn_label = torch.LongTensor(nclass * num)
syn_att = torch.FloatTensor(num, opt.attSize)
syn_noise = torch.FloatTensor(num, opt.nz)
if opt.cuda:
syn_att = syn_att.cuda()
syn_noise = syn_noise.cuda()
for i in range(nclass):
iclass = classes[i]
iclass_att = attribute[iclass]
syn_att.copy_(iclass_att.repeat(num, 1))
syn_noise.normal_(0, 1)
with torch.no_grad():
syn_noisev = Variable(syn_noise)
syn_attv = Variable(syn_att)
fake = generator(syn_noisev, c=syn_attv)
output = fake
syn_feature.narrow(0, i * num, num).copy_(output.data.cpu())
syn_label.narrow(0, i * num, num).fill_(iclass)
return syn_feature, syn_label
def ambiguous_data(x_seen, x_unseen, y_seen, y_unseen, use_cuda=True):
"""Returns ambiguous inputs, pairs of targets, and lambda"""
lam = 0.5
data = torch.cat((x_seen, x_unseen))
labels = torch.cat((y_seen, y_unseen))
batch_size = data.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * data + (1 - lam) * data[index, :]
y_a, y_b = labels, labels[index]
return mixed_x, y_a, y_b, lam
def ambiguous_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def calc_gradient_penalty(netD, real_data, fake_data, input_att):
alpha = torch.rand(opt.batch_size, 1)
alpha = alpha.expand(real_data.size())
if opt.cuda:
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates, Variable(input_att))
ones = torch.ones(disc_interpolates.size())
if opt.cuda:
ones = ones.cuda()
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1
return gradient_penalty
# Gradient penalty for D2
def calc_gradient_penalty2(netD, real_data, fake_data):
# print real_data.size()
alpha = torch.rand(opt.batch_size, 1)
alpha = alpha.expand(real_data.size())
if opt.cuda:
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates.requires_grad_(True)
disc_interpolates = netD(interpolates)
ones = torch.ones(disc_interpolates.size())
if opt.cuda:
ones = ones.cuda()
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=ones,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.lambda1
return gradient_penalty
| 3,946 | 31.352459 | 81 | py |
lsa-zsl | lsa-zsl-main/src/train_images_transductive.py | import os
import random
from datetime import datetime
import loguru
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import classifiers.classifier_images as classifier
import datasets.image_util as util
import networks.models as model
from utils import *
log_name = (
"logs/"
+ datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ "_"
+ opt.dataset
+ "_transductive"
)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
loguru.logger.info("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
loguru.logger.info(
"WARNING: You have a CUDA device, so you should probably run with" " --cuda"
)
# load data
data = util.DATA_LOADER(opt)
loguru.logger.info("# of training samples: ", data.ntrain)
logger = util.Logger(log_name)
logger.write("Params : %s \n" % (vars(opt)))
netG = model.Generator(opt)
netD = model.Discriminator_D1(opt)
netD2 = model.Discriminator_D2(opt)
netE = model.Encoder(opt)
cls = model.LinearClassifier(2048, opt.nclass_all)
criterion = nn.CrossEntropyLoss()
loguru.logger.info(netG)
loguru.logger.info(netD)
###########
# Init Tensors
input_res = torch.FloatTensor(opt.batch_size, opt.resSize)
input_att = torch.FloatTensor(
opt.batch_size, opt.attSize
) # attSize class-embedding size
input_label = torch.LongTensor(opt.batch_size)
input_res_unpair = torch.FloatTensor(opt.batch_size, opt.resSize)
input_att_unpair = torch.FloatTensor(opt.batch_size, opt.attSize)
input_label_unpair = torch.LongTensor(opt.batch_size)
noise = torch.FloatTensor(opt.batch_size, opt.nz)
noise_mix = torch.FloatTensor(opt.batch_size * 2, opt.nz)
zeros_mix = torch.zeros(opt.nclass_all, opt.nz)
one = torch.tensor(1, dtype=torch.float)
mone = one * -1
input_all_attributes = torch.FloatTensor(opt.nclass_all, opt.attSize)
##########
# Cuda
use_cuda = False
if opt.cuda:
netD.cuda()
netG.cuda()
netD2.cuda()
netE.cuda()
cls.cuda()
use_cuda = True
input_res = input_res.cuda()
noise, input_att = noise.cuda(), input_att.cuda()
input_label = input_label.cuda()
input_res_unpair = input_res_unpair.cuda()
input_att_unpair = input_att_unpair.cuda()
input_label_unpair = input_label_unpair.cuda()
one = one.cuda()
mone = mone.cuda()
noise_mix = noise_mix.cuda()
zeros_mix = zeros_mix.cuda()
input_all_attributes = input_all_attributes.cuda()
def sample():
batch_feature, batch_label, batch_att = data.next_seen_batch(opt.batch_size)
input_res.copy_(batch_feature)
input_att.copy_(batch_att)
input_label.copy_(batch_label)
batch_feature, batch_label, batch_att = data.next_unseen_batch(opt.batch_size)
input_res_unpair.copy_(batch_feature)
input_att_unpair.copy_(batch_att)
input_label_unpair.copy_(batch_label)
optimizer = optim.Adam(netE.parameters(), lr=opt.lr)
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD2 = optim.Adam(netD2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerCLS = optim.SGD(cls.parameters(), lr=0.005, momentum=0.9, weight_decay=1e-4)
input_all_attributes.copy_(data.attribute)
best_gzsl_acc = 0
best_zsl_acc = 0
best_generator = None
best_encoder = None
best_gzsl_cls, best_zsl_cls = None, None
for epoch in range(0, opt.nepoch):
for loop in range(0, 2):
for i in range(0, data.ntrain, opt.batch_size):
#########Discriminator training ##############
for p in netD.parameters(): # unfreeze discrimator
p.requires_grad = True
for p in netD2.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG_v update
# Train D1 and D2
gp_sum = 0 # lAMBDA VARIABLE
for iter_d in range(opt.critic_iter):
sample()
netD.zero_grad()
netD2.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att)
input_res_unpairv = Variable(input_res_unpair)
input_att_unpairv = Variable(input_att_unpair)
criticD_real = netD(input_resv, input_attv)
criticD_real = opt.gammaD * criticD_real.mean()
criticD_real.backward(mone)
# non-conditional D on unpaired real data
criticD_real_v_unpair = netD2(input_res_unpairv)
criticD_real_v_unpair = opt.gammaD2 * criticD_real_v_unpair.mean()
if opt.gzsl: # NO
criticD_real_v_unpair_seen = netD2(input_resv)
criticD_real_v_unpair += (
opt.gammaD2 * criticD_real_v_unpair_seen.mean()
)
criticD_real_v_unpair.backward(mone)
if opt.encoded_noise:
means, log_var = netE(input_resv, input_attv)
std = torch.exp(0.5 * log_var)
eps = torch.randn([opt.batch_size, opt.latent_size]).cpu()
eps = Variable(eps.cuda())
z = eps * std + means # torch.Size([64, 312])
else:
noise.normal_(0, 1)
z = Variable(noise)
fake = netG(z, c=input_attv)
criticD_fake = netD(fake.detach(), input_attv)
criticD_fake = opt.gammaD * criticD_fake.mean()
criticD_fake.backward(one)
# non-conditional netD_v unpair fake data
noise.normal_(0, 1)
z_D2 = Variable(noise)
fake_v_unpair = netG(z_D2, c=input_att_unpairv)
criticD_fake_v_unpair = netD2(fake_v_unpair.detach())
criticD_fake_v_unpair = opt.gammaD2 * criticD_fake_v_unpair.mean()
if opt.gzsl:
criticD_fake_v_unpair_seen = netD2(fake.detach())
criticD_fake_v_unpair += (
opt.gammaD2 * criticD_fake_v_unpair_seen.mean()
)
criticD_fake_v_unpair.backward(one)
# gradient penalty
gradient_penalty = opt.gammaD * calc_gradient_penalty(
netD, input_res, fake.data, input_att
)
gradient_penalty_v_unpair = opt.gammaD2 * calc_gradient_penalty2(
netD2, input_res_unpairv, fake_v_unpair.data
)
gradient_penalty_v_unpair.backward()
# if opt.lambda_mult == 1.1:
gp_sum += gradient_penalty.data
gradient_penalty.backward()
Wasserstein_D = criticD_real - criticD_fake
D_cost = (
criticD_fake - criticD_real + gradient_penalty
) # add Y here and #add vae reconstruction loss
optimizerD.step()
# non-conditional D, Wasserstein distance
Wasserstein_D_v2 = criticD_real_v_unpair - criticD_fake_v_unpair
D_cost_v2 = (
criticD_fake_v_unpair
- criticD_real_v_unpair
+ gradient_penalty_v_unpair
)
optimizerD2.step()
gp_sum /= opt.gammaD * opt.lambda1 * opt.critic_iter
if (gp_sum > 1.05).sum() > 0:
opt.lambda1 *= 1.1
elif (gp_sum < 1.001).sum() > 0:
opt.lambda1 /= 1.1
#############Generator training ##############
# Train Generator
for p in netD.parameters(): # freeze discrimator
p.requires_grad = False
for p in netD2.parameters(): # reset requires_grad
p.requires_grad = False
cls.zero_grad()
netE.zero_grad()
netG.zero_grad()
input_resv = Variable(input_res)
input_attv = Variable(input_att)
input_res_unpairv = Variable(input_res_unpair)
input_att_unpairv = Variable(input_att_unpair)
if opt.encoded_noise:
means, log_var = netE(input_resv, input_attv)
std = torch.exp(0.5 * log_var)
eps = torch.randn([opt.batch_size, opt.latent_size]).cpu()
eps = Variable(eps.cuda())
z = eps * std + means
recon_x = netG(z, c=input_attv)
vae_loss_seen = loss_fn(recon_x, input_resv, means, log_var)
errG = vae_loss_seen
criticG_fake = netD(recon_x, input_attv).mean()
fake = recon_x
else:
errG = 0
noise.normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev, c=input_attv)
criticG_fake = netD(fake, input_attv).mean()
G_cost = -criticG_fake
noise.normal_(0, 1)
z_D2 = Variable(noise)
fake_v_unpaired = netG(z_D2, c=input_att_unpairv)
criticG_fake_v_unpair = netD2(fake_v_unpaired).mean()
G_cost_v_unpair = -criticG_fake_v_unpair
errG += opt.gammaG * G_cost + opt.gammaG_D2 * G_cost_v_unpair
errG.backward()
if opt.encoded_noise:
optimizer.step()
optimizerG.step()
inputs, targets_a, targets_b, lam = ambiguous_data(
input_attv,
input_att_unpair,
input_label,
input_label_unpair,
use_cuda,
) # alpha = 1
inputs, targets_a, targets_b = map(Variable, (inputs, targets_a, targets_b))
noise_mix.normal_(0, 1)
z = Variable(noise_mix)
outputs = netG(z, c=inputs)
outputs = cls(outputs)
loss = ambiguous_criterion(criterion, outputs, targets_a, targets_b, lam)
loss.backward()
optimizerCLS.step()
optimizerG.step()
logger.write(
"[%d/%d] Loss_D: %.4f Loss_G: %.4f, Wasserstein_dist:%.4f,"
" vae_loss_seen:%.4f\n"
% (
epoch,
opt.nepoch,
D_cost.data.item(),
G_cost.data.item(),
Wasserstein_D.data.item(),
0,
)
)
netG.eval()
syn_feature, syn_label = generate_syn_feature(
netG, data.unseenclasses, data.attribute, 700
)
syn_feature_seen, syn_label_seen = generate_syn_feature(
netG, data.seenclasses, data.attribute, 500
)
# Generalized zero-shot learning
if opt.gzsl:
# Concatenate real seen features with synthesized unseen features
perm = torch.randperm(data.train_feature.size(0))
idx = perm[:400]
X_real_seen = data.train_feature[idx]
Y_real_seen = data.train_label[idx]
X = torch.cat((X_real_seen, syn_feature_seen), 0)
Y = torch.cat((Y_real_seen, syn_label_seen), 0)
train_X = torch.cat((X, syn_feature), 0)
train_Y = torch.cat((Y, syn_label), 0)
nclass = opt.nclass_all
# Train GZSL classifier
gzsl_cls = classifier.CLASSIFIER(
train_X,
train_Y,
data,
nclass,
opt.cuda,
opt.classifier_lr,
0.5,
25,
opt.syn_num,
generalized=True,
)
if best_gzsl_acc < gzsl_cls.H:
best_acc_seen, best_acc_unseen, best_gzsl_acc = (
gzsl_cls.acc_seen,
gzsl_cls.acc_unseen,
gzsl_cls.H,
)
best_generator, best_encoder, best_gzsl_cls = (
netG.state_dict(),
netE.state_dict(),
gzsl_cls.model.state_dict(),
)
logger.write(
"GZSL: seen=%.4f, unseen=%.4f, h=%.4f\n"
% (gzsl_cls.acc_seen, gzsl_cls.acc_unseen, gzsl_cls.H)
)
# Zero-shot learning
# Train ZSL classifier
zsl_cls = classifier.CLASSIFIER(
syn_feature,
util.map_label(syn_label, data.unseenclasses),
data,
data.unseenclasses.size(0),
opt.cuda,
opt.classifier_lr,
0.5,
25,
opt.syn_num,
generalized=False,
)
acc = zsl_cls.acc
if best_zsl_acc < acc:
best_zsl_acc = acc
best_zsl_cls = zsl_cls.model.state_dict()
logger.write("ZSL: unseen accuracy=%.4f\n" % (acc))
# reset G to training mode
netG.train()
netE.train()
#### SAVE MODELS
os.makedirs("models/GZSL/" + opt.dataset, exist_ok=True)
os.makedirs("models/ZSL/" + opt.dataset, exist_ok=True)
torch.save(best_generator, "models/GZSL/" + opt.dataset + "/generator.pt")
torch.save(best_encoder, "models/GZSL/" + opt.dataset + "/encoder.pt")
torch.save(best_gzsl_cls, "models/GZSL/" + opt.dataset + "/gzsl_cls.pt")
torch.save(best_zsl_cls, "models/ZSL/" + opt.dataset + "/zsl_cls.pt")
logger.write("Dataset %s\n" % (opt.dataset))
logger.write("the best ZSL unseen accuracy is %s\n" % (best_zsl_acc))
if opt.gzsl:
logger.write("the best GZSL seen accuracy is %.4f\n" % (best_acc_seen))
logger.write("the best GZSL unseen accuracy is %.4f\n" % (best_acc_unseen))
logger.write("the best GZSL H is %.4f\n" % (best_gzsl_acc))
| 13,577 | 36.821727 | 88 | py |
lsa-zsl | lsa-zsl-main/src/networks/models.py | import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# Encoder
class Encoder(nn.Module):
def __init__(self, opt):
super(Encoder, self).__init__()
layer_sizes = opt.encoder_layer_sizes
latent_size = opt.latent_size
layer_sizes[0] += latent_size
self.fc1 = nn.Linear(layer_sizes[0], layer_sizes[-1])
self.fc3 = nn.Linear(layer_sizes[-1], latent_size * 2)
self.lrelu = nn.LeakyReLU(0.2, True)
self.linear_means = nn.Linear(latent_size * 2, latent_size)
self.linear_log_var = nn.Linear(latent_size * 2, latent_size)
self.apply(weights_init)
def forward(self, x, c=None):
if c is not None:
x = torch.cat((x, c), dim=-1)
x = self.lrelu(self.fc1(x))
x = self.lrelu(self.fc3(x))
means = self.linear_means(x)
log_vars = self.linear_log_var(x)
return means, log_vars
# Decoder/Generator
class Generator(nn.Module):
def __init__(self, opt):
super(Generator, self).__init__()
layer_sizes = opt.decoder_layer_sizes
latent_size = opt.latent_size
input_size = latent_size * 2
self.fc1 = nn.Linear(input_size, layer_sizes[0])
self.fc3 = nn.Linear(layer_sizes[0], layer_sizes[1])
self.lrelu = nn.LeakyReLU(0.2, True)
self.sigmoid = nn.Sigmoid()
self.apply(weights_init)
def _forward(self, z, c=None):
z = torch.cat((z, c), dim=-1)
x1 = self.lrelu(self.fc1(z))
x = self.sigmoid(self.fc3(x1))
self.out = x1
return x
def forward(self, z, a1=None, c=None, feedback_layers=None):
if feedback_layers is None:
return self._forward(z, c)
else:
z = torch.cat((z, c), dim=-1)
x1 = self.lrelu(self.fc1(z))
feedback_out = x1 + a1 * feedback_layers
x = self.sigmoid(self.fc3(feedback_out))
return x
class Discriminator_D1(nn.Module):
def __init__(self, opt):
super(Discriminator_D1, self).__init__()
self.fc1 = nn.Linear(opt.resSize + opt.attSize, opt.ndh)
self.fc2 = nn.Linear(opt.ndh, 1)
self.lrelu = nn.LeakyReLU(0.2, True)
self.apply(weights_init)
def forward(self, x, att):
h = torch.cat((x, att), 1)
self.hidden = self.lrelu(self.fc1(h))
h = self.fc2(self.hidden)
return h
class Discriminator_D2(nn.Module):
def __init__(self, opt):
super(Discriminator_D2, self).__init__()
self.fc1 = nn.Linear(opt.resSize, opt.ndh)
self.fc2 = nn.Linear(opt.ndh, 1)
# self.fc3 = nn.Linear(2048, 1)
self.lrelu = nn.LeakyReLU(0.2, True)
self.apply(weights_init)
def forward(self, x):
h = self.lrelu(self.fc1(x))
h = self.fc2(h)
return h
class LinearClassifier(nn.Module):
def __init__(self, input_dim, nclass):
super(LinearClassifier, self).__init__()
self.fc = nn.Linear(input_dim, 4096)
self.relu = nn.ReLU()
self.cls = nn.Linear(4096, nclass)
def forward(self, x):
o = self.relu(self.fc(x))
return self.cls(o)
| 3,431 | 29.371681 | 69 | py |
lsa-zsl | lsa-zsl-main/src/datasets/image_util.py | import numpy as np
import scipy.io as sio
import torch
from sklearn import preprocessing
class Logger(object):
def __init__(self, filename):
self.filename = filename
f = open(self.filename + ".log", "a")
f.close()
def write(self, message):
f = open(self.filename + ".log", "a")
f.write(message)
f.close()
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def map_label(label, classes):
mapped_label = torch.LongTensor(label.size())
for i in range(classes.size(0)):
mapped_label[label == classes[i]] = i
return mapped_label
def map_label_un(label, classes, step):
mapped_label = torch.LongTensor(label.size())
for i in range(classes.size(0)):
mapped_label[label == classes[i]] = i + step
return mapped_label
class DATA_LOADER(object):
def __init__(self, opt):
self.read_matdataset(opt)
self.index_in_epoch = 0
self.epochs_completed = 0
def read_matdataset(self, opt):
matcontent = sio.loadmat(
opt.dataroot + "/" + opt.dataset + "/" + opt.image_embedding + ".mat"
)
feature = matcontent["features"].T
label = matcontent["labels"].astype(int).squeeze() - 1
matcontent = sio.loadmat(
opt.dataroot + "/" + opt.dataset + "/" + opt.class_embedding + "_splits.mat"
)
trainval_loc = matcontent["trainval_loc"].squeeze() - 1
train_loc = matcontent["train_loc"].squeeze() - 1
val_unseen_loc = matcontent["val_loc"].squeeze() - 1
test_seen_loc = matcontent["test_seen_loc"].squeeze() - 1
test_unseen_loc = matcontent["test_unseen_loc"].squeeze() - 1
self.attribute = torch.from_numpy(matcontent["att"].T).float()
self.attribute /= (
self.attribute.pow(2)
.sum(1)
.sqrt()
.unsqueeze(1)
.expand(self.attribute.size(0), self.attribute.size(1))
)
if not opt.validation:
if opt.preprocessing:
if opt.standardization:
print("standardization...")
scaler = preprocessing.StandardScaler()
else:
scaler = preprocessing.MinMaxScaler()
_train_feature = scaler.fit_transform(feature[trainval_loc])
_test_seen_feature = scaler.transform(feature[test_seen_loc])
_test_unseen_feature = scaler.transform(feature[test_unseen_loc])
self.train_feature = torch.from_numpy(_train_feature).float()
mx = self.train_feature.max()
self.train_feature.mul_(1 / mx)
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(
_test_unseen_feature
).float()
self.test_unseen_feature.mul_(1 / mx)
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(_test_seen_feature).float()
self.test_seen_feature.mul_(1 / mx)
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
else:
self.train_feature = torch.from_numpy(feature[trainval_loc]).float()
self.train_label = torch.from_numpy(label[trainval_loc]).long()
self.test_unseen_feature = torch.from_numpy(
feature[test_unseen_loc]
).float()
self.test_unseen_label = torch.from_numpy(label[test_unseen_loc]).long()
self.test_seen_feature = torch.from_numpy(
feature[test_seen_loc]
).float()
self.test_seen_label = torch.from_numpy(label[test_seen_loc]).long()
else:
self.train_feature = torch.from_numpy(feature[train_loc]).float()
self.train_label = torch.from_numpy(label[train_loc]).long()
self.test_unseen_feature = torch.from_numpy(feature[val_unseen_loc]).float()
self.test_unseen_label = torch.from_numpy(label[val_unseen_loc]).long()
self.seenclasses = torch.from_numpy(np.unique(self.train_label.numpy()))
self.unseenclasses = torch.from_numpy(np.unique(self.test_unseen_label.numpy()))
self.ntrain = self.train_feature.size()[0]
self.ntest_seen = self.test_seen_feature.size()[0]
self.ntest_unseen = self.test_unseen_feature.size()[0]
self.ntrain_class = self.seenclasses.size(0)
self.ntest_class = self.unseenclasses.size(0)
self.train_class = self.seenclasses.clone()
self.allclasses = torch.arange(0, self.ntrain_class + self.ntest_class).long()
self.train_mapped_label = map_label(self.train_label, self.seenclasses)
def next_seen_batch(self, seen_batch):
idx = torch.randperm(self.ntrain)[0:seen_batch]
batch_feature = self.train_feature[idx]
batch_label = self.train_label[idx]
batch_att = self.attribute[batch_label]
return batch_feature, batch_label, batch_att
def next_unseen_batch(self, batch_size):
idx1 = torch.randperm(self.ntest_unseen)[0:batch_size]
idx2 = torch.randperm(self.ntest_unseen)[0:batch_size]
batch_feature = self.test_unseen_feature[idx1]
batch_label = self.test_unseen_label[idx2]
batch_att = self.attribute[batch_label]
return batch_feature, batch_label, batch_att
| 5,813 | 39.943662 | 88 | py |
lsa-zsl | lsa-zsl-main/src/classifiers/classifier_images.py | import copy
import pdb
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.preprocessing import MinMaxScaler
from torch.autograd import Variable
import datasets.image_util as util
class CLASSIFIER:
def __init__(
self,
_train_X,
_train_Y,
data_loader,
_nclass,
_cuda,
_lr=0.001,
_beta1=0.5,
_nepoch=20,
_batch_size=100,
generalized=True,
):
self.train_X = _train_X.clone()
self.train_Y = _train_Y.clone()
self.test_seen_feature = data_loader.test_seen_feature.clone()
self.test_seen_label = data_loader.test_seen_label
self.test_unseen_feature = data_loader.test_unseen_feature.clone()
self.test_unseen_label = data_loader.test_unseen_label
self.seenclasses = data_loader.seenclasses
self.unseenclasses = data_loader.unseenclasses
self.batch_size = _batch_size
self.nepoch = _nepoch
self.nclass = _nclass
self.input_dim = _train_X.size(1)
self.cuda = _cuda
self.model = LINEAR_LOGSOFTMAX_CLASSIFIER(self.input_dim, self.nclass)
self.model.apply(util.weights_init)
self.criterion = nn.NLLLoss()
self.input = torch.FloatTensor(_batch_size, self.input_dim)
self.label = torch.LongTensor(_batch_size)
self.lr = _lr
self.beta1 = _beta1
self.optimizer = optim.Adam(
self.model.parameters(), lr=_lr, betas=(_beta1, 0.999)
)
if self.cuda:
self.model.cuda()
self.criterion.cuda()
self.input = self.input.cuda()
self.label = self.label.cuda()
self.index_in_epoch = 0
self.epochs_completed = 0
self.ntrain = self.train_X.size()[0]
if generalized:
self.acc_seen, self.acc_unseen, self.H, self.epoch = self.fit()
else:
self.acc, self.best_model = self.fit_zsl()
def fit_zsl(self):
best_acc = 0
mean_loss = 0
last_loss_epoch = 1e8
best_model = copy.deepcopy(self.model.state_dict())
for epoch in range(self.nepoch):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
mean_loss += loss.item()
loss.backward()
self.optimizer.step()
acc = self.val(
self.test_unseen_feature,
self.test_unseen_label,
self.unseenclasses,
)
if acc > best_acc:
best_acc = acc
best_model = copy.deepcopy(self.model.state_dict())
return best_acc, best_model
def fit(self):
best_H = 0
best_seen = 0
best_unseen = 0
out = []
best_model = copy.deepcopy(self.model.state_dict())
for epoch in range(self.nepoch):
for i in range(0, self.ntrain, self.batch_size):
self.model.zero_grad()
batch_input, batch_label = self.next_batch(self.batch_size)
self.input.copy_(batch_input)
self.label.copy_(batch_label)
inputv = Variable(self.input)
labelv = Variable(self.label)
output = self.model(inputv)
loss = self.criterion(output, labelv)
loss.backward()
self.optimizer.step()
acc_seen = 0
acc_unseen = 0
acc_seen = self.val_gzsl(
self.test_seen_feature, self.test_seen_label, self.seenclasses
)
acc_unseen = self.val_gzsl(
self.test_unseen_feature,
self.test_unseen_label,
self.unseenclasses,
)
H = 2 * acc_seen * acc_unseen / (acc_seen + acc_unseen)
if H > best_H:
best_seen = acc_seen
best_unseen = acc_unseen
best_H = H
return best_seen, best_unseen, best_H, epoch
def next_batch(self, batch_size):
start = self.index_in_epoch
# shuffle the data at the first epoch
if self.epochs_completed == 0 and start == 0:
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# the last batch
if start + batch_size > self.ntrain:
self.epochs_completed += 1
rest_num_examples = self.ntrain - start
if rest_num_examples > 0:
X_rest_part = self.train_X[start : self.ntrain]
Y_rest_part = self.train_Y[start : self.ntrain]
# shuffle the data
perm = torch.randperm(self.ntrain)
self.train_X = self.train_X[perm]
self.train_Y = self.train_Y[perm]
# start next epoch
start = 0
self.index_in_epoch = batch_size - rest_num_examples
end = self.index_in_epoch
X_new_part = self.train_X[start:end]
Y_new_part = self.train_Y[start:end]
if rest_num_examples > 0:
return torch.cat((X_rest_part, X_new_part), 0), torch.cat(
(Y_rest_part, Y_new_part), 0
)
else:
return X_new_part, Y_new_part
else:
self.index_in_epoch += batch_size
end = self.index_in_epoch
return self.train_X[start:end], self.train_Y[start:end]
def val_gzsl(self, test_X, test_label, target_classes, penalty=0):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start + self.batch_size)
if self.cuda:
with torch.no_grad():
inputX = Variable(test_X[start:end].cuda())
else:
with torch.no_grad():
inputX = Variable(test_X[start:end])
output = self.model(inputX)
if penalty > 0:
output[self.seenclasses] = output[self.seenclasses] - penalty
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc_gzsl(
test_label, predicted_label, target_classes
)
return acc
def compute_per_class_acc_gzsl(self, test_label, predicted_label, target_classes):
acc_per_class = 0
for i in target_classes:
idx = test_label == i
acc_per_class += torch.sum(
test_label[idx] == predicted_label[idx]
) / torch.sum(idx)
acc_per_class /= target_classes.size(0)
return acc_per_class
def val(self, test_X, test_label, target_classes):
start = 0
ntest = test_X.size()[0]
predicted_label = torch.LongTensor(test_label.size())
for i in range(0, ntest, self.batch_size):
end = min(ntest, start + self.batch_size)
if self.cuda:
with torch.no_grad():
inputX = Variable(test_X[start:end].cuda())
else:
with torch.no_grad():
inputX = Variable(test_X[start:end])
output = self.model(inputX)
_, predicted_label[start:end] = torch.max(output.data, 1)
start = end
acc = self.compute_per_class_acc(
util.map_label(test_label, target_classes),
predicted_label,
target_classes.size(0),
)
return acc
def compute_per_class_acc(self, test_label, predicted_label, nclass):
acc_per_class = torch.FloatTensor(nclass).fill_(0)
for i in range(nclass):
idx = test_label == i
acc_per_class[i] = torch.sum(
test_label[idx] == predicted_label[idx]
) / torch.sum(idx)
return acc_per_class.mean()
class LINEAR_LOGSOFTMAX_CLASSIFIER(nn.Module):
def __init__(self, input_dim, nclass):
super(LINEAR_LOGSOFTMAX_CLASSIFIER, self).__init__()
self.fc = nn.Linear(input_dim, nclass)
self.logic = nn.LogSoftmax(dim=1)
def forward(self, x):
o = self.logic(self.fc(x))
return o
| 8,736 | 35.404167 | 86 | py |
SSTOD | SSTOD-main/train.py | import sys
from torch import nn
from transformers import AutoTokenizer
import ontology
from models.model import UBAR_plus
sys.path.append('..')
from evaluate import validate_metric, validation_metric_gpt
import argparse
import json
import logging
import os
import random
import time
from torch.utils.data import RandomSampler, DistributedSampler, BatchSampler, DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from config import parser, BIO_TAG
import numpy as np
import torch
import torch.nn.functional as F
from reader import data_util
from reader.DataBase import DB
from utils.optim import Optim
from utils.utils import log_first_inputs, maskedNll
import warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def add_special_tokens(tokenizer):
"""
add special tokens to gpt tokenizer
serves a similar role of Vocab.construt()
make a dict of special tokens
"""
special_tokens = []
action = ontology.all_acts
for word in action:
word = '[' + word + ']'
special_tokens.append(word)
special_tokens_list = ontology.special_tokens
special_tokens.extend(special_tokens_list)
special_tokens_dict = {'additional_special_tokens': special_tokens}
tokenizer.add_special_tokens(special_tokens_dict)
return tokenizer
def add_torch_input(inputs, device):
# to tensor and to device
contexts_tensor = torch.from_numpy(inputs['contexts_np']).long()
contexts_tensor = contexts_tensor.to(device)
inputs['contexts_tensor'] = contexts_tensor
return inputs
def add_torch_input_eval(inputs, device):
# inputs: context
inputs['context_tensor'] = torch.tensor(
[inputs['context']]).to(device)
return inputs
def calculate_loss_and_accuracy(outputs, labels):
lm_logits = outputs[0]
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
pad_id = 0
loss_fct = nn.CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
loss = loss_fct(
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# avg loss
not_ignore = shift_labels.ne(pad_id)
num_targets = not_ignore.long().sum().item()
loss /= num_targets
return loss
def train(args, dataloader, dev_dataloader, model, tokenizer, writer, optimizer, scheduler, num_train_steps, device):
log_inputs = 2
global_step = 0
all_batches = dataloader.get_batches(args.train_batch_size)
for epoch in range(args.num_train_epochs):
epoch_step = 0
tr_loss = 0.0
logging_loss = 0.0
btm = time.time()
oom_time = 0
optimizer.zero_grad()
train_iterator = dataloader.get_nontranspose_data_iterator(all_batches)
for batch_idx, batch in enumerate(train_iterator):
# train
inputs = dataloader.convert_batch_session(batch)
model.train()
if log_inputs > 0:
log_first_inputs({'input': tokenizer.decode(inputs['contexts'][0])})
log_inputs -= 1
# to tensor
inputs = add_torch_input(inputs, device)
outputs = model(inputs['contexts_tensor'])
loss = calculate_loss_and_accuracy(
outputs, labels=inputs['contexts_tensor'])
loss.backward()
tr_loss += loss.item()
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad_norm)
epoch_step += 1
# step, wrt gradient_accumulation_steps, clip grad norm
if (epoch_step + 1) % args.gradient_accumulation_steps == 0 or (epoch_step + 1 == num_train_steps):
optimizer.step()
if scheduler is not None:
scheduler.step()
optimizer.zero_grad()
# global_step: actual step the optimizer took
global_step += 1
logs = {}
# logging: loss, lr... after certain amount of steps
if args.report_interval > 0 and global_step % args.report_interval == 0:
loss_scalar = (tr_loss - logging_loss) / args. report_interval
logging_loss = tr_loss
logs['loss'] = loss_scalar
logging.info(
'Global step: {}, epoch step: {}, interval loss: {:.4f}'.format(
global_step, epoch_step, loss_scalar
))
logging.info('Train epoch time: {:.2f} min, epoch loss: {:.4f}'.format(
(time.time()-btm) / 60, tr_loss))
# save model after every epoch
save_model(args.exp_path, epoch, tr_loss/epoch_step, model, tokenizer)
def validate(args, dataloader, model, tokenizer, db, device):
logging.info("**** Running Evaluation ****")
eval_data = dataloader.data
btm = time.time()
result_collection = {}
with torch.no_grad():
for dial_idx, dialog in enumerate(tqdm(eval_data)):
pv_turn = {}
for turn_idx, turn in enumerate(dialog):
first_turn = (turn_idx == 0)
inputs = dataloader.convert_turn_eval(turn, pv_turn, first_turn)
inputs = add_torch_input_eval(inputs, device)
context_length = len(inputs['context'])
# generate kd_snippets
if args.use_true_curr_kdpn:
outputs = turn['kdpn']
kdpn_gen, decoded_kdpn = decode_generated_kdpn(tokenizer, outputs)
else:
max_len = 40
outputs = model.generate(input_ids=inputs['context_tensor'],
max_length=context_length + max_len, temperature=0.7,
pad_token_id=0,
eos_token_id=tokenizer.encode('<eos_k>')[1])
generated = outputs[0].cpu().numpy().tolist()
kdpn_gen, decoded_kdpn = decode_generated_kdpn(tokenizer, generated[context_length - 1:])
# generate correct_char and score
if args.use_true_curr_kp:
outputs = turn['cc']
decoded_cc = decode_outputs_cc(tokenizer, outputs)
else:
decoded_cc = model.generate_correct_char(decoded_kdpn) if len(decoded_kdpn) else ''
ccs = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(
'<sos_c>' + decoded_cc + '<eos_c>'))
inputs['context_tensor'] = torch.tensor([inputs['context'][:-1] + kdpn_gen + ccs
+ tokenizer.encode('<sos_b>',
add_special_tokens=False)]).to(device)
context_length = len(inputs['context_tensor'][0])
# generate bspn, act, response
outputs = model.generate(input_ids=inputs['context_tensor'],
max_length=context_length + 80, temperature=0.7,
# top_p=0.9, num_beams=4,
pad_token_id=0,
eos_token_id=tokenizer.encode('<eos_a>')[1])
generated_bsa = outputs[0].cpu().numpy().tolist()
generated_bsa = generated_bsa[context_length - 1:]
try:
bspn_gen, aspn_gen = decode_generated_bspn_act(tokenizer, generated_bsa)
except ValueError as exception:
logging.info(str(exception))
logging.info(tokenizer.decode(generated_bsa))
aspn_gen, bspn_gen = [], []
# query DB
if args.use_true_db_pointer:
db = turn['db']
else:
db_results = dataloader.kb.act_to_DBPointer(tokenizer.decode(aspn_gen[1:-1]))
db = tokenizer.convert_tokens_to_ids(
tokenizer.tokenize('<sos_db> ' + db_results + ' <eos_db>'))
inputs['context_tensor_db'] = torch.tensor(
[inputs['context'][:-1] + kdpn_gen + ccs + generated_bsa + db + tokenizer.encode(
'<sos_r>', add_special_tokens=False)]).to(
device)
context_length = len(inputs['context_tensor_db'][0])
outputs_db = model.generate(input_ids=inputs['context_tensor_db'],
max_length=context_length + 40, temperature=0.7,
# top_p=0.9, num_beams=4,
pad_token_id=0,
eos_token_id=tokenizer.encode('<eos_r>')[1])
generated_r = outputs_db[0].cpu().numpy().tolist()
generated_r = generated_r[context_length - 1:]
try:
resp_gen = decode_generated_resp(tokenizer, generated_r)
decoded = {'kdpn': kdpn_gen, 'cc': ccs, 'bspn': bspn_gen, 'aspn': aspn_gen, 'resp': resp_gen}
except ValueError as exception:
logging.info(str(exception))
logging.info(tokenizer.decode(generated_r))
decoded = {'kdpn': [], 'resp': [], 'cc': [], 'bspn': [], 'aspn': []}
turn['resp_gen'] = decoded['resp']
turn['kdpn_gen'] = turn['kdpn'] if args.use_true_curr_kdpn else decoded['kdpn']
turn['cc_gen'] = turn['cc'] if args.use_true_curr_kp else decoded['cc']
turn['bspn_gen'] = decoded['bspn']
turn['aspn_gen'] = decoded['aspn']
pv_turn['labels'] = inputs['labels'] # all true previous context
pv_turn['resp'] = decoded['resp']
pv_turn['bspn'] = decoded['bspn']
pv_turn['kdpn'] = turn['kdpn'] if args.use_true_curr_kdpn else decoded['kdpn']
pv_turn['cc'] = turn['cc'] if args.use_true_curr_kp else decoded['cc']
pv_turn['db'] = turn['db'] if args.use_true_db_pointer else db
pv_turn['aspn'] = decoded['aspn']
result_collection.update(
dataloader.inverse_transpose_turn(dialog))
results, _ = dataloader.wrap_result_lm(result_collection)
data_path = os.path.join(args.data_dir, 'test.json')
joint_acc, slot_acc, success = validation_metric_gpt(data_path, dataloader, results)
logging.info('test' + ' results: joint_acc: {:.2f}\tslot_acc: {:.2f}\tsuccess: {:.2f}'
.format(joint_acc * 100, slot_acc * 100, success * 100))
def decode_generated_kdpn(tokenizer, generated):
sos_k_id = tokenizer.encode('<sos_k>')[1]
eos_k_id = tokenizer.encode('<eos_k>')[1]
kd_id = tokenizer.encode('<kd>')[1]
if sos_k_id in generated:
sos_k_idx = generated.index(sos_k_id)
else:
sos_k_idx = 1
if eos_k_id in generated:
eos_k_idx = generated.index(eos_k_id)
else:
eos_k_idx = len(generated) - 1
kspn_gen = generated[sos_k_idx: eos_k_idx + 1]
decoded_gen = tokenizer.decode(kspn_gen[1: -1])
decoded_gen = decoded_gen.strip().split('<kd>')
if '' in decoded_gen:
decoded_gen.remove('')
return kspn_gen, decoded_gen
def decode_outputs_cc(tokenizer, outputs):
cc = []
for output in outputs:
char = tokenizer.decode(output)
if char == '<sos_c>' or char =='<eos_c>':
continue
cc.append(char)
return ' '.join(cc)
def decode_generated_bspn_act(tokenizer, generated):
"""decode generated"""
eos_b_id = tokenizer.encode('<eos_b>')[1]
eos_a_id = tokenizer.encode('<eos_a>')[1]
# eos_a may not exists if gpt2 generated repetitive words
if eos_a_id in generated:
eos_a_idx = generated.index(eos_a_id)
else:
eos_a_idx = len(generated) - 1
logging.info('eos_a not in generated: ' + tokenizer.decode(generated))
eos_b_idx = generated.index(eos_b_id)
bspn_gen = generated[: eos_b_idx + 1]
aspn_gen = generated[eos_b_idx + 1: eos_a_idx + 1]
return bspn_gen, aspn_gen
def decode_generated_resp(tokenizer, generated):
eos_r_id = tokenizer.encode('<eos_r>')[1]
if eos_r_id in generated:
eos_r_idx = generated.index(eos_r_id)
else:
eos_r_idx = len(generated) - 1
return generated[: eos_r_idx + 1]
def save_model(exp_path, epoch, loss, model, tokenizer):
save_path = os.path.join(
exp_path, 'epoch{}_trloss{:.2f}'.format(epoch+1, loss))
if not os.path.exists(save_path):
os.mkdir(save_path)
logging.info('Saving model checkpoint to %s', save_path)
# save model
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(save_path, 'pytorch_model.bin')
torch.save(model_to_save.state_dict(), output_model_file)
# save tokenizer
# tokenizer.save(os.path.join(save_path, 'tokenizer.json'))
def load_model(model, save_path):
ckpt_path = os.path.join(save_path, 'pytorch_model.bin')
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt)
def main():
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
if not torch.cuda.is_available():
device = torch.device('cpu')
n_gpu = 0
else:
device = torch.device(args.device)
n_gpu = 1
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if args.mode not in ['train', 'test']:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
# if os.path.exists(args.exp_path) and os.listdir(args.exp_path):
# raise ValueError("Output directory ({}) already exists and is not empty.".format(args.exp_path))
# os.makedirs(args.exp_path, exist_ok=True)
if args.mode == 'train' and not os.path.exists(args.exp_path):
os.mkdir(args.exp_path)
db = DB(args.db_path, args.tfidf_path)
# prepare tokenizer and model
tokenizer = AutoTokenizer.from_pretrained(args.model_path)
add_special_tokens(tokenizer)
if args.mode == 'train':
model = UBAR_plus(args, tokenizer, device)
else:
model = UBAR_plus(args, tokenizer, device)
load_model(model, args.eval_load_path)
model.to(device)
optim = Optim(learning_rate=args.learning_rate)
writer = SummaryWriter(log_dir='./log')
# prepare dataset
train_dataset = None
num_train_steps = None
if args.mode == 'train':
from reader.Dataset import UBARDataset
train_dataset = UBARDataset(args, tokenizer, args.data_dir, 'train', db)
dev_dataset = UBARDataset(args, tokenizer, args.data_dir, 'dev', db)
num_train_steps = int(len(train_dataset) * args.num_train_epochs / args.train_batch_size / args.gradient_accumulation_steps)
optimizer, scheduler = optim.get_optimizer_scheduler(model, num_train_steps)
logger.info('start training...')
# train
model.train()
train(args,
dataloader=train_dataset,
dev_dataloader=dev_dataset,
model=model,
tokenizer=tokenizer,
writer=writer,
optimizer=optimizer,
scheduler=scheduler,
num_train_steps=num_train_steps,
device=device)
elif args.mode == 'test':
from reader.Dataset import UBARDataset
test_data = UBARDataset(args, tokenizer, args.data_dir, 'test', db)
model.eval()
validate(args, test_data, model, tokenizer, db, device)
if __name__ == '__main__':
main()
| 16,756 | 38.614657 | 132 | py |
SSTOD | SSTOD-main/models/TFIDF.py | import heapq
import json
import pickle
import re
import numpy as np
from pypinyin import lazy_pinyin
from sklearn.metrics.pairwise import cosine_similarity
import torch
from tqdm import tqdm
class TfIdf(object):
def __init__(self):
self.word_vectorizer, self.word_transformer, self.word_weight = pickle.load(open('./data/kb/db_words.pkl', 'rb'))
self.pinyin_vectorizer, self.pinyin_transformer, self.pinyin_weight = pickle.load(
open('./data/kb/db_pinyin.pkl', 'rb'))
self.db = json.load(open('./data/kb/db_key.json', 'r', encoding='utf-8'))
def search(self, sen, n: int = 1, rate=0.09):
x_word = [' '.join(sen)]
tf_idf = self.word_transformer.transform(self.word_vectorizer.transform(x_word))
x_test_weight = tf_idf.toarray() # 测试集TF-IDF权重矩阵
result_word = cosine_similarity(x_test_weight, self.word_weight)
x_pinyin = [' '.join(lazy_pinyin(sen))]
tf_idf = self.pinyin_transformer.transform(self.pinyin_vectorizer.transform(x_pinyin))
x_test_weight = tf_idf.toarray() # 测试集TF-IDF权重矩阵
result_pinyin = cosine_similarity(x_test_weight, self.pinyin_weight)
if n == 1:
result = result_word * rate + result_pinyin * (1 - rate)
re2 = np.argmax(result, axis=1)[0]
all_ = [[self.db[re2], result[0][re2]]]
else:
result = (result_word * rate + result_pinyin * (1 - rate)).tolist()[0]
re2 = map(result.index, heapq.nlargest(n, result))
all_ = [[self.db[i], result[idx][i]] for idx, i in enumerate(re2)]
# all_ = [self.db[i] for i in list(re2)]
return all_
def search_batch(self, batch, n: int = 1, rate=0.09):
x_word = [' '.join(sen) for sen in batch]
tf_idf = self.word_transformer.transform(self.word_vectorizer.transform(x_word))
x_test_weight = tf_idf.toarray() # 测试集TF-IDF权重矩阵
result_word = cosine_similarity(x_test_weight, self.word_weight)
x_pinyin = [' '.join(lazy_pinyin(sen)) for sen in batch]
tf_idf = self.pinyin_transformer.transform(self.pinyin_vectorizer.transform(x_pinyin))
x_test_weight = tf_idf.toarray() # 测试集TF-IDF权重矩阵
result_pinyin = cosine_similarity(x_test_weight, self.pinyin_weight)
result = result_word * rate + result_pinyin * (1 - rate)
re2 = torch.topk(torch.tensor(result), n).indices.tolist()
all_ = [[[self.db[i], result[idx][i]] for i in t_k] for idx, t_k in enumerate(re2)]
candidates_word = [word[0][0] for word in all_]
return ' '.join(candidates_word) | 2,614 | 44.877193 | 121 | py |
SSTOD | SSTOD-main/models/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModelWithLMHead, AutoModel
from models.TFIDF import TfIdf
class UBAR_plus(nn.Module):
def __init__(self, args, tokenizer, device):
super(UBAR_plus, self).__init__()
self.args = args
self.tokenizer = tokenizer
self.device = device
self.kp_model = TfIdf()
self.gpt_model = AutoModelWithLMHead.from_pretrained(args.model_path)
self.gpt_model.resize_token_embeddings(len(self.tokenizer))
def forward(self, inputs_ids):
return self.gpt_model(inputs_ids)
def generate(self, **kwargs):
return self.gpt_model.generate(**kwargs)
def generate_correct_char(self, input_ids):
"""query the database and select the most similarity chars"""
KP_results = self.kp_model.search_batch(input_ids)
if len(KP_results) > 10:
return KP_results[:10]
return KP_results
| 978 | 29.59375 | 77 | py |
SSTOD | SSTOD-main/reader/Dataset.py | import copy
import json
import logging
import os
import random
from collections import OrderedDict
from torch.utils.data import Dataset
import ontology
from reader.data_util import read_name_dialog_from_file
from utils import utils
class _BaseDataset(Dataset):
def __init__(self, args):
super(_BaseDataset, self).__init__()
self.vocab = None
self.db = None
self.args = args
self.set_stats = {}
def _bucket_by_turn(self, encoded_data):
turn_bucket = {}
for dial in encoded_data:
turn_len = len(dial)
if turn_len not in turn_bucket:
turn_bucket[turn_len] = []
turn_bucket[turn_len].append(dial)
del_l = []
for k in turn_bucket:
if k >= 5:
del_l.append(k)
logging.debug("bucket %d instance %d" % (k, len(turn_bucket[k])))
# for k in del_l:
# turn_bucket.pop(k)
return OrderedDict(sorted(turn_bucket.items(), key=lambda i: i[0]))
def _construct_mini_batch(self, data, batch_size):
all_batches = []
batch = []
for dial in data:
batch.append(dial)
if len(batch) == batch_size:
# print('batch size: %d, batch num +1'%(len(batch)))
all_batches.append(batch)
batch = []
# if remainder > 1/2 batch_size, just put them in the previous batch, otherwise form a new batch
# print('last batch size: %d, batch num +1'%(len(batch)))
if len(batch) > 0.5 * batch_size:
all_batches.append(batch)
elif len(all_batches):
all_batches[-1].extend(batch)
else:
all_batches.append(batch)
return all_batches
def transpose_batch(self, batch):
dial_batch = []
turn_num = len(batch[0])
for turn in range(turn_num):
turn_l = {}
for dial in batch:
this_turn = dial[turn]
for k in this_turn:
if k not in turn_l:
turn_l[k] = []
turn_l[k].append(this_turn[k])
dial_batch.append(turn_l)
return dial_batch
def get_nontranspose_data_iterator(self, all_batches):
for i, batch in enumerate(all_batches):
yield batch
def inverse_transpose_turn(self, turn_list):
"""
eval, one dialog at a time
"""
dialogs = {}
turn_num = len(turn_list)
dial_id = turn_list[0]['dial_id']
dialogs[dial_id] = []
for turn_idx in range(turn_num):
dial_turn = {}
turn = turn_list[turn_idx]
for key, value in turn.items():
if key=='dial_id':
continue
if key == 'pointer' and self.db is not None:
turn_domain = turn['turn_domain'][-1]
value = self.db.pointerBack(value, turn_domain)
dial_turn[key] = value
dialogs[dial_id].append(dial_turn)
return dialogs
def inverse_transpose_batch(self, turn_batch_list):
"""
:param turn_batch_list: list of transpose dial batch
"""
dialogs = {}
total_turn_num = len(turn_batch_list)
# initialize
for idx_in_batch, dial_id in enumerate(turn_batch_list[0]['dial_id']):
dialogs[dial_id] = []
for turn_n in range(total_turn_num):
dial_turn = {}
turn_batch = turn_batch_list[turn_n]
for key, v_list in turn_batch.items():
if key == 'dial_id':
continue
value = v_list[idx_in_batch]
if key == 'pointer' and self.db is not None:
turn_domain = turn_batch['turn_domain'][idx_in_batch][-1]
value = self.db.pointerBack(value, turn_domain)
dial_turn[key] = value
dialogs[dial_id].append(dial_turn)
return dialogs
def __getitem__(self, item):
pass
def __len__(self):
pass
def collate_fn(self, data_batch):
pass
class UBARDataset(_BaseDataset):
def __init__(self, args, tokenizer, data_path, mode, kb):
super(UBARDataset, self).__init__(args)
self.args = args
self.kb = kb
self.tokenizer = tokenizer
args.mode = mode
self.pad_token_id = 0
self._load_data(data_path, mode)
def _load_data(self, data_path, mode):
encoded_file_list = {'train': 'train.encoded.UBARdata.json', 'dev': 'dev.encoded.UBARdata.json',
'test': 'test.encoded.UBARdata.json'}
encoded_file = encoded_file_list[mode]
encoded_file = os.path.join(data_path, encoded_file)
if not os.path.exists(encoded_file):
logging.info('Encoding data and save the encoded data in {}'.format(encoded_file))
raw_data_path = os.path.join(data_path, mode + '.json')
dialogs = self.read_data(raw_data_path)
encoded_data = self._get_seqs(dialogs)
json.dump(encoded_data, open(encoded_file, 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
self.data = copy.deepcopy(encoded_data)
random.shuffle(self.data)
else:
logging.info('loading encoded data from {}'.format(encoded_file))
encoded_data = json.load(open(encoded_file, 'r', encoding='utf-8'))
self.data = copy.deepcopy(encoded_data)
random.shuffle(self.data)
def read_data(self, path):
with open(path, 'r', encoding='utf-8') as f:
raw_data = json.load(f)
data_file = read_name_dialog_from_file(raw_data)
return data_file
def _get_seqs(self, dialogs):
data_list = []
for fn, dial in dialogs.items():
encoded_dial = []
for idx, t in enumerate(dial['log']):
enc = {}
enc['dial_id'] = fn
enc['user'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_u> ' +
t['user'] + ' <eos_u>'))
enc['resp'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_r> ' +
t['sys'] + ' <eos_r>'))
enc['bspn'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_b> ' + t['bs'] + ' <eos_b>'))
enc['aspn'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_a> ' + '[' +
t['sys_act'] + ']' + t['sys_act_param'] + ' <eos_a>'))
enc['turn_num'] = t['turn_num']
if 'db' in t:
enc['db'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_db> ' + '[SEP]'.join(t['db']) + ' <eos_db>'))
else:
enc['db'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_db> ' + ' <eos_db>'))
enc['cc'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_c> ' + t['correct_char'] + ' <eos_c>'))
enc['kdpn'] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(
'<sos_k>' + t['kdpn'] + '<eos_k>'))
encoded_dial.append(enc)
data_list.append(encoded_dial)
return data_list
def convert_batch_session(self, dial_batch):
"""
convert the whole session for training
concat [U_0, K_0, c_0, B_0, A_0, R_0, ... , U_n, K_n, c_n, B_n, A_n, R_n]
"""
inputs, outputs = {}, {}
contexts = []
cell_list = ['user', 'kdpn', 'cc', 'bspn', 'aspn', 'db', 'resp']
for idx, dial in enumerate(dial_batch):
context = []
for turn_num, turn in enumerate(dial):
for cell in cell_list:
context.extend(turn[cell])
contexts.append(context)
inputs['contexts'] = contexts
inputs['contexts_np'], inputs['lengths'] = utils.padSeqs_gpt(inputs['contexts'], self.pad_token_id)
return inputs
def get_batches(self, batch_size):
"""
compute dataset stats.
"""
global dia_count
log_str = ''
dial = self.data
turn_bucket = self._bucket_by_turn(dial)
# self._shuffle_turn_bucket(turn_bucket)
all_batches = []
num_training_steps = 0
num_turns = 0
num_dials = 0
for k in turn_bucket:
if k == 1 or k >= 17:
continue
batches = self._construct_mini_batch(turn_bucket[k], batch_size)
log_str += "turn num:%d, batch num: %d last batch len: %d\n" % (
k, len(batches), len(batches[-1]))
# print("turn num:%d, dial num:v%d, batch num: %d, "%(k, len(turn_bucket[k]), len(batches)))
num_training_steps += k * len(batches)
num_turns += k * len(turn_bucket[k])
num_dials += len(turn_bucket[k])
all_batches += batches
log_str += 'total batch num: %d\n' % len(all_batches)
# print('total batch num: %d'%len(all_batches))
return all_batches
def convert_turn_eval(self, turn, pv_turn, first_turn=False):
"""
input: [all previous ubar, U_t, B_t, A_t] predict R_t
firts turn: [U_t, B_t, A_t] predict R_t
regarding the context, all previous ubar is too slow, try the previous ubar
"""
inputs = {}
context_list = []
# predict_list = []
prompt = ''
# predict bspn aspn resp. db are not predicted. this part tbd.
context_list = ['user']
# predict_list = ['bspn', 'aspn','db', 'resp']
prompt = '<sos_k>'
if first_turn:
context = []
for c in context_list:
context += turn[c]
inputs['context'] = context + self.tokenizer.encode(prompt, add_special_tokens=False)
inputs['labels'] = context
else:
context = []
for c in context_list:
context += turn[c]
pv_context = pv_turn['labels'] + pv_turn['kdpn'] + pv_turn['cc'] + pv_turn['bspn'] + pv_turn['aspn'] + pv_turn['db'] + pv_turn['resp']
# prompt response, add sos_r
inputs['context'] = pv_context + context + self.tokenizer.encode(prompt, add_special_tokens=False)
inputs['labels'] = pv_context + context # use all previous ubar history
if len(inputs['context']) > 900:
# print('len exceeds 900')
inputs['context'] = inputs['context'][-900:]
return inputs
def wrap_result_lm(self, result_dict, eos_syntax=None):
results = []
eos_syntax = ontology.eos_tokens if not eos_syntax else eos_syntax
sos_syntax = ontology.sos_tokens
# ground truth bs, as, ds.. generate response
field = ['dial_id', 'turn_num', 'user', 'bspn_gen', 'resp_gen', 'resp', 'aspn_gen', 'aspn',
'bspn', 'kdpn', 'kdpn_gen', 'cc', 'cc_gen']
for dial_id, turns in result_dict.items():
for turn_idx, turn in enumerate(turns):
entry = {'dial_id': dial_id}
for key in field:
if key in ['dial_id']:
continue
v = turn.get(key, '')
if key in eos_syntax and v != '':
# remove eos tokens
v = self.tokenizer.decode(v)
v = v.split()
# remove eos/sos in span
if eos_syntax[key] in v:
v.remove(eos_syntax[key])
if sos_syntax[key] in v:
v.remove(sos_syntax[key])
v = " ".join(v)
else:
pass # v = v
entry[key] = v
results.append(entry)
return results, field
def __len__(self):
return len(self.data) | 12,361 | 36.460606 | 146 | py |
SSTOD | SSTOD-main/reader/DataBase.py | import json
import logging
import os
import pickle
import random
import re
import pypinyin
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
import ontology
class DB(object):
def __init__(self, path, tfidf_path):
self.db = []
self.read_db(path)
# self.vectorizer, self.weight = self.load_tf_idf_vector(tfidf_path)
def read_db(self, path):
with open(path, 'r', encoding='utf-8') as f:
db_file = json.load(f)
for word, descs in db_file['nameDict'].items():
word_descs = descs['word']
struct_descs = descs['pack']
for desc in word_descs:
self.db.append((word, 'word', desc))
for desc in struct_descs:
self.db.append((word, 'pack', desc))
self.word2kd = {}
for kd in self.db:
if kd[0] in self.word2kd:
self.word2kd[kd[0]].append(kd[2])
else:
self.word2kd[kd[0]] = [kd[2]]
# def _save_tf_idf_vector(self, file_path):
# logging.info('saving tfidf vector')
# all_data = [k[2] for k in self.db]
# pinyin_list = [' '.join(pypinyin.lazy_pinyin(s)) for s in all_data]
#
# vectorizer = TfidfVectorizer(analyzer='word', lowercase=False)
# tfidf = vectorizer.fit_transform(pinyin_list)
# weight = tfidf.toarray()
# pickle.dump([vectorizer, weight], open(file_path, 'wb'))
#
# return vectorizer, weight
#
# def _load_tf_idf_vector(self, file_path):
# logging.info('loading tfidf vector')
# vectorizer, weight = pickle.load(open(file_path, 'rb'))
# return vectorizer, weight
#
# def load_tf_idf_vector(self, save_path):
# if not os.path.exists(save_path):
# os.mkdir(save_path)
# file_path = os.path.join(save_path, 'tfidf.pkl')
# if os.path.exists(file_path):
# vectorizer, weight = self._load_tf_idf_vector(file_path)
# else:
# vectorizer, weight = self._save_tf_idf_vector(file_path)
# return vectorizer, weight
#
# # TOP N
# def research(self, batch, n: int=10):
# x_word = [' '.join(pypinyin.lazy_pinyin(sen)) for sen in batch]
# tf_idf = self.vectorizer.transform(x_word)
# cosine_similarities = linear_kernel(tf_idf, self.weight)
# related_docs_indices = cosine_similarities.argsort()[:, :-n-1:-1]
#
# related_docs = [[self.db[ind] for ind in related_docs_indices[b]] for b in range(len(batch))]
# return related_docs
def act_to_DBPointer(self, action):
"""
Select a knowledge for a sub-slot.
"""
if '[' in action:
act_s_idx = action.index('[')
else:
act_s_idx = 0
if ']' in action:
act_e_idx = action.index(']')
else:
act_e_idx = len(action) - 1
act = action[act_s_idx + 1:act_e_idx]
param = action[act_e_idx + 1:].strip()
if action not in ontology.action:
return ''
db_pointer = []
if act == 'explicit_confirm':
for w in param:
if w in self.word2kd:
db = random.choice(self.word2kd[w])
db_pointer.append(db)
db_results = ';'.join(db_pointer)
if len(db_results) > 40:
db_results = db_results[:40]
return db_results
if __name__ == '__main__':
class KdDataset(Dataset):
def __init__(self, filename):
super(KdDataset, self).__init__()
self.kd, self.c_char = self.read_file(filename)
assert len(self.kd) == len(self.c_char)
def __getitem__(self, item):
return self.kd[item], self.c_char[item]
def __len__(self):
return len(self.kd)
@staticmethod
def read_file(filename):
dialogs = json.load(open(filename, 'r', encoding='utf-8'))
all_kd = []
all_char = []
for dialog in dialogs.values():
for turn in dialog['turns']:
if 'staff' in turn:
continue
kds = turn['user_label'][0]['param-knowledge']
for kd in kds:
if kd['knowledge']:
if re.search('[A-Za-z]]', kd['knowledge'][0]['string']):
continue
all_kd.append(kd['knowledge'][0]['string'])
all_char.append(kd['correct_char'])
return all_kd, all_char
db = DB('../../data/database.json', '../utils/tfidf/')
dataset = KdDataset('../../data/test.json')
kd_loader = DataLoader(dataset, batch_size=4000, shuffle=True)
succ_count = 0
total_count = 0
for batch in tqdm(kd_loader):
x, y_target = batch
y_predict = db.research(x, 45)
y_predict_char = [[kd[0] for kd in sample] for sample in y_predict]
for i in range(len(x)):
succ_count += (y_target[i] in y_predict_char[i])
# if y_target[i] not in y_predict_char[i]:
# print(x[i], y_target[i])
total_count += 1
print('succ rate: ', succ_count / total_count)
| 5,465 | 34.72549 | 103 | py |
SSTOD | SSTOD-main/utils/optim.py | import os
import torch
from torch import optim
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
class Optim(object):
def __init__(self, **kwargs):
params = kwargs
self.lr = params.get('learning_rate', 1e-4)
self.method = params.get('method', 'adamw')
self.weight_decay = params.get('weight_decay', 0.0)
self.lr_decay = params.get('lr_decay', 0.0)
self.warmup_steps = params.get('warmup_steps', -1)
def get_optimizer_scheduler(self, model, t_total):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if self.method == 'sgd':
optimizer = optim.SGD(optimizer_grouped_parameters, lr=self.lr)
elif self.method == 'adam':
optimizer = optim.Adam(optimizer_grouped_parameters, lr=self.lr)
elif self.method == 'adamw':
# optimizer = optim.AdamW(optimizer_grouped_parameters, lr=self.lr)
optimizer = AdamW(optimizer_grouped_parameters, lr=self.lr)
else:
raise RuntimeError("invalid optim method: " + self.method)
num_warmup_steps = self.warmup_steps if self.warmup_steps >= 0 else int(t_total * 0.2)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
return optimizer, scheduler
| 1,719 | 43.102564 | 121 | py |
SSTOD | SSTOD-main/utils/utils.py | import logging
import numpy as np
import torch
from torch.autograd import Variable
from config import BIO_TAG
def log_first_inputs(log_dict):
logging.info("**** Input Examples ****")
for key, context in log_dict.items():
logging.info(key + ': ' + context)
def padSeqs_gpt(sequences, pad_id, maxlen=None):
lengths = []
for x in sequences:
lengths.append(len(x))
num_samples = len(sequences)
seq_mexlen = np.max(lengths)
# maxlen = 1024
if seq_mexlen > 1024: # gpt2.n_ctx
# print('maxlen exceeds 1024')
maxlen = 1024
else:
maxlen = seq_mexlen
# tokenizer.encode('<|endoftext|>') = ['50256']
# All labels set to ``-100`` are ignored (masked), the loss is only
# computed for labels in ``[0, ..., config.vocab_size]`` (from modeling_gpt2.GPT2LMHeadModel)
x = (np.ones((num_samples, maxlen)) * pad_id)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list was found in padSeqs')
# trunc method = 'pre'
trunc = s[-maxlen:]
trunc = np.asarray(trunc)
# pad method = 'post'
x[idx, :len(trunc)] = trunc
return x, lengths
def padSeqs(sequences, maxlen=None, truncated = False, pad_method='post',
trunc_method='pre', dtype='int32', value=0.):
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
seq_maxlen = np.max(lengths)
if maxlen is not None and truncated:
maxlen = min(seq_maxlen, maxlen)
else:
maxlen = seq_maxlen
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if not len(s):
print('empty list/array was found')
continue # empty list/array was found
if trunc_method == 'pre':
trunc = s[-maxlen:]
elif trunc_method == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % trunc_method)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if pad_method == 'post':
x[idx, :len(trunc)] = trunc
elif pad_method == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % pad_method)
return x
def maskedNll(seq, target, pad_id=0):
"""
Compute the Cross Entropy Loss of ground truth (target) sentence given the model
S: <START>, E: <END>, W: word token, 1: padding token, P(*): logProb
Teacher forced logProbs (seq):
[P(W1) P(W2) P(E) - - -]
Required gtSeq (target):
[ W1 W2 E 1 1 1]
Mask (non-zero tokens in target):
[ 1 1 1 0 0 0]
"""
# Generator a mask of non-padding (non-zero) tokens
mask = target.data.ne(pad_id)
loss = 0
assert isinstance(target, Variable)
if isinstance(target, Variable):
mask = Variable(mask, volatile=target.volatile)
gtLogProbs = torch.gather(seq, 2, target.unsqueeze(2)).squeeze(2)
maskedNLL = torch.masked_select(gtLogProbs, mask)
nll_loss = -torch.sum(maskedNLL) / seq.size(1)
return nll_loss | 4,035 | 32.081967 | 114 | py |
WWW2018_Camel | WWW2018_Camel-master/code/utility.py | import six.moves.cPickle as pickle
#import pandas as pd
import numpy as np
import string
import re
import random
from keras.preprocessing import sequence
from itertools import *
class input_data:
def __init__(self, args):
self.args = args
# direct paper-author relation
p_a_dir_list_train = [[] for k in range(self.args.paper_num)]
p_a_dir_list_test = [[] for k in range(self.args.paper_num)]
author_train = [0] * self.args.author_num
dir_relation_f = ["/paper-author-list-train.txt", "/paper-author-list-test.txt"]
#p_a_list_train_f = open(self.args.data_path + "/paper_author_list_train.txt", "r")
for f_index in range(len(dir_relation_f)):
f_name = dir_relation_f[f_index]
neigh_f = open(self.args.data_path + f_name, "r")
for line in neigh_f:
line = line.strip()
p_index = string.atoi(re.split(':',line)[0])
a_list = re.split(',',re.split(':',line)[1])
if f_name == "/paper-author-list-train.txt":
for i in range(len(a_list)):
p_a_dir_list_train[p_index].append('a'+str(a_list[i]))
author_train[int(a_list[i])] = 1
elif f_name == "/paper-author-list-test.txt":
for j in range(len(a_list)):
p_a_dir_list_test[p_index].append('a'+str(a_list[j]))
neigh_f.close()
self.p_a_dir_list_train = p_a_dir_list_train
self.p_a_dir_list_test = p_a_dir_list_test
#print p_a_dir_list_test[13113]
self.dir_len = sum(len(x) for x in self.p_a_dir_list_train)
self.author_train = author_train
test_p_id_list = []
for i in range(self.args.paper_num):
if len(p_a_dir_list_test[i]):
test_p_id_list.append(i)
self.test_p_id_list = test_p_id_list
# indirect paper-author relation from heterogeneous walk
p_a_indir_list_train = [[] for k in range(self.args.paper_num)]
def p_a_indir_set(path):
indir_relation_f = ["/APA_walk.txt", "/APPA_walk.txt", "/APVPA_walk.txt"]
#het_walk_f = open(self.args.data_path + "/het_random_walk.txt", "r")
for f_index in range(len(indir_relation_f)):
f_name = indir_relation_f[f_index]
neigh_f = open(self.args.data_path + f_name, "r")
for line in neigh_f:
line=line.strip()
path = re.split(' ',line)
for k in range(len(path)):
curr_node = path[k]
if curr_node[0] == 'p':
for w in range(k - self.args.window, k + self.args.window +1):
if w >= 0 and w < len(path) and w != k:
neigh_node = path[w]
if neigh_node[0] == 'a' and neigh_node not in self.p_a_dir_list_train[int(curr_node[1:])]:
p_a_indir_list_train[int(curr_node[1:])].append(neigh_node)
neigh_f.close()
return p_a_indir_list_train
self.p_a_indir_list_train = p_a_indir_set(self.args.data_path)
self.indir_len = sum(len(x) for x in self.p_a_indir_list_train)
def load_p_content(path, word_n = 100000):
f = open(path, 'rb')
p_content_set = pickle.load(f)
f.close()
def remove_unk(x):
return [[1 if w >= word_n else w for w in sen] for sen in x]
p_content, p_content_id = p_content_set
p_content = remove_unk(p_content)
p_content_set = (p_content, p_content_id)
return p_content_set
def load_word_embed(path, word_n = 54559, word_dim = 128):
word_embed = np.zeros((word_n + 2, word_dim))
f = open(path,'r')
for line in islice(f, 1, None):
index = int(line.split()[0])
embed = np.array(line.split()[1:])
word_embed[index] = embed
return word_embed
# text content (e.g., abstract) of paper and pretrain word embedding
self.p_content, self.p_content_id = load_p_content(path = self.args.data_path + '/content.pkl')
self.p_content = sequence.pad_sequences(self.p_content, maxlen = self.args.c_len, value = 0., padding = 'post', truncating = 'post')
self.word_embed = load_word_embed(path = self.args.data_path + '/word_embedding.txt')
def p_a_a_dir_next_batch(self):
p_a_a_dir_list_batch = []
for i in range(self.args.paper_num):
for j in range(len(self.p_a_dir_list_train[i])):
a_neg = random.randint(0, self.args.author_num - 1)
while (('a'+str(a_neg)) in self.p_a_dir_list_train[i]):
a_neg = random.randint(0, self.args.author_num - 1)
a_pos = int(self.p_a_dir_list_train[i][j][1:])
triple=[i, a_pos, a_neg]
p_a_a_dir_list_batch.append(triple)
return p_a_a_dir_list_batch
def p_a_a_indir_next_batch(self):
p_a_a_indir_list_batch = []
p_threshold = float(self.dir_len)/self.indir_len + 3e-3
#print p_threshold
for i in range(self.args.paper_num):
for j in range(len(self.p_a_indir_list_train[i])):
if random.random() < p_threshold:
a_neg = random.randint(0, self.args.author_num - 1)
while (('a'+str(a_neg)) in self.p_a_dir_list_train[i]):
a_neg = random.randint(0, self.args.author_num - 1)
a_pos = int(self.p_a_indir_list_train[i][j][1:])
triple=[i, a_pos, a_neg]
p_a_a_indir_list_batch.append(triple)
return p_a_a_indir_list_batch
def gen_content_mini_batch(self, triple_batch):
p_c_data = []
for i in range(len(triple_batch)):
c_temp = (self.p_content[triple_batch[i][0]]).reshape(self.args.c_len)
p_c_data.append(c_temp)
return p_c_data
def gen_evaluate_neg_ids(self):
#neg_num = 100
author_n_ave = 0
paper_n = 0
p_a_neg_ids_f = open(self.args.data_path + "/paper_author_neg_ids.txt", "w")
for i in range(self.args.paper_num):
if len(self.p_a_dir_list_test[i]):
p_a_neg_ids_f.write(str(i) + ":")
neg_num = 100 - len(self.p_a_dir_list_test[i])
for j in range(neg_num):
neg_id = random.randint(0, self.args.author_num - 1)
neg_id_str = 'a' + str(neg_id)
while (neg_id_str in self.p_a_dir_list_test[i]):
neg_id = random.randint(0, self.args.author_num - 1)
neg_id_str = 'a' + str(neg_id)
p_a_neg_ids_f.write(str(neg_id) + ",")
p_a_neg_ids_f.write("\n")
author_n_ave += len(self.p_a_dir_list_test[i])
paper_n += 1
p_a_neg_ids_f.close()
print ("author_n_ave_test: " + str(float(author_n_ave)/paper_n))
def Camel_evaluate(self, p_text_deep_f, a_latent_f, top_K):
p_id_map = [0] * self.args.paper_num
new_id_temp = 0
for k in range(len(self.test_p_id_list)):
p_id_temp = self.test_p_id_list[k]
p_id_map[p_id_temp] = new_id_temp
new_id_temp += 1
p_a_neg_list_test = [[] for k in range(self.args.paper_num)]
p_a_neg_ids_f = open(self.args.data_path + "/paper_author_neg_ids.txt", "r")
for line in p_a_neg_ids_f:
line = line.strip()
p_id = int(re.split(':', line)[0])
a_list = re.split(':', line)[1]
a_list_ids = re.split(',', a_list)
for i in range(len(a_list_ids) - 1):
p_a_neg_list_test[p_id].append(int(a_list_ids[i]))
p_a_neg_ids_f.close()
# only evaluate test paper which has author in training data
test_p_has_train_a = [0] * self.args.paper_num
for i in range(self.args.paper_num):
for j in range(len(self.p_a_dir_list_test[i])):
a_id_temp = int(self.p_a_dir_list_test[i][j][1:])
if self.author_train[a_id_temp]:
test_p_has_train_a[i] += 1
# Recall/Precision Scores
recall_ave = 0
pre_ave = 0
evaluate_p_num = 0
ave_a_num = 0.0
for i in range(self.args.paper_num):
if len(self.p_a_dir_list_test[i]) and len(p_a_neg_list_test[i]) and test_p_has_train_a[i]:
evaluate_p_num += 1
correct_num = 0
score_list = []
for j in range(len(self.p_a_dir_list_test[i])):
a_id_temp = int(self.p_a_dir_list_test[i][j][1:])
score_temp = np.dot(p_text_deep_f[p_id_map[i]], a_latent_f[a_id_temp])
score_list.append(score_temp)
for k in range(len(p_a_neg_list_test[i])):
a_id_temp = p_a_neg_list_test[i][k]
score_temp = np.dot(p_text_deep_f[p_id_map[i]], a_latent_f[a_id_temp])
score_list.append(score_temp)
score_list.sort()
score_threshold = score_list[ - top_K - 1]
for jj in range(len(self.p_a_dir_list_test[i])):
a_id_temp = int(self.p_a_dir_list_test[i][jj][1:])
if self.author_train[a_id_temp]:
score_temp = np.dot(p_text_deep_f[p_id_map[i]], a_latent_f[a_id_temp])
if score_temp > score_threshold:
correct_num += 1
recall_ave += float(correct_num) / test_p_has_train_a[i]
pre_ave += float(correct_num) / top_K
ave_a_num += test_p_has_train_a[i]
print ("total evaluate paper number: " + str(evaluate_p_num))
print ("average evaluate author number: " + str(ave_a_num / evaluate_p_num))
recall_ave = recall_ave / evaluate_p_num
pre_ave = pre_ave / evaluate_p_num
F_1= (2 * recall_ave * pre_ave) /(recall_ave + pre_ave)
print ("recall_ave@top_K: " + str(recall_ave))
print ("pre_ave@top_K: " + str(pre_ave))
# AUC Score
AUC_ave = 0
for i in range(self.args.paper_num):
if len(self.p_a_dir_list_test[i]) and len(p_a_neg_list_test[i]) and test_p_has_train_a[i]:
neg_score_list = []
correct_num = 0
pair_num = 0
for k in range(len(p_a_neg_list_test[i])):
a_id_temp = p_a_neg_list_test[i][k]
score_temp = np.dot(p_text_deep_f[p_id_map[i]], a_latent_f[a_id_temp])
neg_score_list.append(score_temp)
for j in range(len(self.p_a_dir_list_test[i])):
a_id_temp = int(self.p_a_dir_list_test[i][j][1:])
if self.author_train[a_id_temp]:
pos_score = np.dot(p_text_deep_f[p_id_map[i]], a_latent_f[a_id_temp])
for jj in range(len(neg_score_list)):
pair_num += 1
if pos_score > neg_score_list[jj]:
correct_num += 1
AUC_ave += float(correct_num) / pair_num
AUC_ave = AUC_ave / evaluate_p_num
print ("AUC_ave: " + str(AUC_ave))
| 9,449 | 34.130112 | 135 | py |
WWW2018_Camel | WWW2018_Camel-master/code/Camel.py | import tensorflow as tf
import keras
from keras.preprocessing import sequence
import cPickle as pkl
import utility as U
from itertools import *
import argparse
import os
import random
import numpy as np
# input arguments
parser = argparse.ArgumentParser(description='demo code of Camel')
parser.add_argument('--author_num', type = int, default = 28649,
help = 'max id of author')
parser.add_argument('--paper_num', type = int, default = 21046,
help = 'max id of paper')
parser.add_argument('--embed_dim', type = int, default = 128,
help = 'embed dimension of author and paper')
# parser.add_argument('--hidden_n', type = int, default = 128,
# help = 'hidden dimension of GRU encoder')
parser.add_argument('--model_path', type=str, default='../Camel',
help='path to save model')
parser.add_argument('--window', type = int, default = 6,
help = 'window size for indirect relation')
parser.add_argument('--c_len', type = int, default = 100,
help = 'max len of paper content')
parser.add_argument('--batch_size', type = int, default = 500,
help = 'batch size of training')
parser.add_argument('--learn_rate', type = float, default = 0.001,
help = 'learning rate')
parser.add_argument('--train_iter_max', type = float, default = 1000,
help = 'max number of training iterations')
parser.add_argument('--save_model_freq', type = float, default = 5,
help = 'number of iterations to save model')
parser.add_argument('--c_reg', type = float, default = 0.001,
help = 'coefficient of regularization')
parser.add_argument('--margin_d', type = float, default = 0.1,
help = 'margin distance of augmented component')
parser.add_argument('--c_tradeoff', type = float, default = 0.1,
help = 'tradeoff coefficient of augmented component')
parser.add_argument('--data_path', type=str, default='../data/AMiner-T-2013',
help='path to data')
parser.add_argument('--train_test_label', type= int, default = 0,
help='train/test label: 0 - train, 1 - test, 2 - tf graph test/generate negative ids for evaluation')
parser.add_argument('--top_K', type= int, default = 10,
help='length of return list per paper in evaluation')
parser.add_argument('--seed', type= int, default = 100,
help='random seed')
args = parser.parse_args()
print(args)
# parameters setting
author_n = args.author_num
paper_n = args.paper_num
top_K = args.top_K
embed_d = args.embed_dim
hidden_n = args.embed_dim
c_len = args.c_len
c_reg = args.c_reg
margin_d = args.margin_d
c_tradeoff = args.c_tradeoff
batch_s = args.batch_size
lr = args.learn_rate
iter_max = args.train_iter_max
save_freq = args.save_model_freq
data_path = args.data_path
model_path = args.model_path
train_test_label = args.train_test_label
random_seed = args.seed
# data preparation
input_data = U.input_data(args = args)
word_embed = input_data.word_embed
# fix seed
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# generate negative author ids in evaluation
if train_test_label == 2:
print "test"
#p_text_all = input_data.p_content[input_data.test_p_id_list]
#print len(p_text_all)
#input_data.gen_evaluate_neg_ids()
# Camel (objective function formulation) begin #
if train_test_label == 0:
# tensor preparation
# direct and indirect triple relations
p_a_a_dir = tf.placeholder(tf.int32, [None, 3])
p_a_a_indir = tf.placeholder(tf.int32, [None, 3])
# paper content in direct and indirect relations
p_c_dir_input = tf.placeholder(tf.int32, [None, c_len])
p_c_indir_input = tf.placeholder(tf.int32, [None, c_len])
# define latent features/parameters of author
author_embed = tf.Variable(tf.random_normal([author_n, embed_d], mean = 0, stddev = 0.01), name = "a_latent_pars")
# pretrain word embedding of paper content
p_c_dir_word_e = tf.cast(tf.nn.embedding_lookup(word_embed, p_c_dir_input), tf.float32)
p_c_indir_word_e = tf.cast(tf.nn.embedding_lookup(word_embed, p_c_indir_input), tf.float32)
# GRU encoder
cell = tf.contrib.rnn.GRUCell(hidden_n)
p_c_dir_deep_e, dir_state = tf.nn.dynamic_rnn(cell, p_c_dir_word_e, dtype = tf.float32)
with tf.variable_scope('', reuse=True):
p_c_indir_deep_e, indir_state = tf.nn.dynamic_rnn(cell, p_c_indir_word_e, dtype = tf.float32)
p_c_dir_e = tf.reduce_mean(p_c_dir_deep_e, 1)
p_c_indir_e = tf.reduce_mean(p_c_indir_deep_e, 1)
# accumuate loss
# loss of direct relation: distance metric learning
Loss_1 = []
for i in range(batch_s):
p_e = tf.gather(p_c_dir_e, i)
a_e_pos = tf.gather(author_embed, p_a_a_dir[i][1])
a_e_pos = tf.reshape(a_e_pos, [1, embed_d])
a_e_neg = tf.gather(author_embed, p_a_a_dir[i][2])
a_e_neg = tf.reshape(a_e_neg, [1, embed_d])
#margin loss
Loss_1.append(tf.maximum(margin_d + tf.reduce_sum(tf.square(tf.subtract(p_e, a_e_pos))) - tf.reduce_sum(tf.square(tf.subtract(p_e, a_e_neg))), tf.zeros([1, 1])))
# loss of indirect relation: heterogeneous Skip-gram
bias = tf.Variable(0.1, trainable = True)
Loss_2 = []
for i in range(batch_s):
p_e = tf.gather(p_c_indir_e, i)
a_e_pos = tf.gather(author_embed, p_a_a_indir[i][1])
a_e_pos = tf.reshape(a_e_pos, [1, embed_d])
a_e_neg = tf.gather(author_embed, p_a_a_indir[i][2])
a_e_neg = tf.reshape(a_e_neg, [1, embed_d])
#cross entropy loss for graph smoothness constraint
# negative sampling degrades to cross entropy when negative size = 1
sum1 = tf.log(tf.sigmoid(tf.reduce_sum(tf.multiply(p_e, a_e_pos)) + bias))
sum2 = tf.log(tf.sigmoid(- tf.reduce_sum(tf.multiply(p_e, a_e_neg)) - bias))
Loss_2.append(- (sum1 + sum2))
# joint loss
t_v = tf.trainable_variables()
reg_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in t_v])
# objective without graph smoothness constraint
#joint_loss = tf.reduce_sum(Loss_1) + c_reg * reg_loss
# objective of Camel
joint_loss = tf.reduce_sum(Loss_1) + c_tradeoff * tf.reduce_sum(Loss_2) + c_reg * reg_loss
# optimizer graph smoothness constraint
optimizer = tf.train.AdamOptimizer(learning_rate = lr).minimize(joint_loss)
# Camel (objective function formulation) end #
# train/test
if train_test_label == 0:# train model
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep = 2)
with tf.Session(config = tf.ConfigProto(inter_op_parallelism_threads = 2,
intra_op_parallelism_threads = 2)) as sess:
sess.run(init)
for epoch in range(1, iter_max):
print("epoch: "+str(epoch))
p_a_a_dir_batch = input_data.p_a_a_dir_next_batch()
p_a_a_indir_batch = input_data.p_a_a_indir_next_batch()
mini_batch_n = int(len(p_a_a_dir_batch)/batch_s)
# divide each iteration into some mini batches
for i in range(mini_batch_n):
p_a_a_dir_mini_batch = p_a_a_dir_batch[i*batch_s:(i+1)*batch_s]
p_c_dir_mini_batch = input_data.gen_content_mini_batch(p_a_a_dir_mini_batch)
p_a_a_indir_mini_batch = p_a_a_indir_batch[i*batch_s:(i+1)*batch_s]
p_c_indir_mini_batch = input_data.gen_content_mini_batch(p_a_a_indir_mini_batch)
feed_dict = {p_a_a_dir: p_a_a_dir_mini_batch, p_c_dir_input: p_c_dir_mini_batch, \
p_a_a_indir: p_a_a_indir_mini_batch, p_c_indir_input: p_c_indir_mini_batch}
_, loss_v = sess.run([optimizer, joint_loss], feed_dict)
if i == 0:
print("loss_value: "+str(loss_v))
# last mini batch
p_a_a_dir_mini_batch = p_a_a_dir_batch[len(p_a_a_dir_batch) - batch_s:len(p_a_a_dir_batch)]
p_c_dir_mini_batch = input_data.gen_content_mini_batch(p_a_a_dir_mini_batch)
p_a_a_indir_mini_batch = p_a_a_indir_batch[len(p_a_a_indir_batch) - batch_s:len(p_a_a_indir_batch)]
p_c_indir_mini_batch = input_data.gen_content_mini_batch(p_a_a_indir_mini_batch)
feed_dict = {p_a_a_dir: p_a_a_dir_mini_batch, p_c_dir_input: p_c_dir_mini_batch, \
p_a_a_indir: p_a_a_indir_mini_batch, p_c_indir_input: p_c_indir_mini_batch}
_, loss_v = sess.run([optimizer, joint_loss], feed_dict)
# save model for evaluation
if epoch % save_freq == 0:
if not os.path.exists(model_path):
os.makedirs(model_path)
saver.save(sess, model_path + "/Camel" + str(epoch) + ".ckpt")
# evaluation tracking during training
# better to batch generate paper embedding for large data
p_text_all = input_data.p_content[input_data.test_p_id_list]
p_text_deep_f = sess.run([p_c_dir_e], {p_c_dir_input: p_text_all})
p_text_deep_f = p_text_deep_f[0]
a_latent_f = tf.get_default_graph().get_tensor_by_name("a_latent_pars:0")
a_latent_f = a_latent_f.eval()
input_data.Camel_evaluate(p_text_deep_f, a_latent_f, top_K)
elif train_test_label == 1:# test model
with tf.Session(config = tf.ConfigProto(inter_op_parallelism_threads = 2,
intra_op_parallelism_threads = 2)) as sess:
restore_idx = 20 # set restore model idx
saver.restore(sess, model_path + "Camel" + str(restore_idx) + ".ckpt")
# load paper semantic deep embedding by learned rnn encoder
p_text_all = input_data.p_content
p_text_deep_f = sess.run([p_c_dir_e], {p_c_dir_input: p_text_all})
p_text_deep_f = p_text_deep_f[0]
# load learned author latent features/parameters
a_latent_f = tf.get_default_graph().get_tensor_by_name("a_latent_pars:0")
a_latent_f = a_latent_f.eval()
# model evaluation
# better to batch generate paper embedding for large data
input_data.Camel_evaluate(p_text_deep_f, a_latent_f, top_K)
else:
print "tf graph test finish."
| 9,396 | 35.996063 | 163 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/run.py | import logging
from typing import List, Optional
import hydra
import omegaconf
import pytorch_lightning as pl
from omegaconf import DictConfig, ListConfig
from pytorch_lightning import Callback
from nn_core.callbacks import NNTemplateCore
from nn_core.common import PROJECT_ROOT
from nn_core.common.utils import enforce_tags, seed_index_everything
from nn_core.model_logging import NNLogger
from nn_core.serialization import NNCheckpointIO
# Force the execution of __init__.py if this file is executed directly.
import {{ cookiecutter.package_name }} # noqa
from {{ cookiecutter.package_name }}.data.datamodule import MetaData
pylogger = logging.getLogger(__name__)
def build_callbacks(cfg: ListConfig, *args: Callback) -> List[Callback]:
"""Instantiate the callbacks given their configuration.
Args:
cfg: a list of callbacks instantiable configuration
*args: a list of extra callbacks already instantiated
Returns:
the complete list of callbacks to use
"""
callbacks: List[Callback] = list(args)
for callback in cfg:
pylogger.info(f"Adding callback <{callback['_target_'].split('.')[-1]}>")
callbacks.append(hydra.utils.instantiate(callback, _recursive_=False))
return callbacks
def run(cfg: DictConfig) -> str:
"""Generic train loop.
Args:
cfg: run configuration, defined by Hydra in /conf
Returns:
the run directory inside the storage_dir used by the current experiment
"""
seed_index_everything(cfg.train)
fast_dev_run: bool = cfg.train.trainer.fast_dev_run
if fast_dev_run:
pylogger.info(f"Debug mode <{cfg.train.trainer.fast_dev_run=}>. Forcing debugger friendly configuration!")
# Debuggers don't like GPUs nor multiprocessing
cfg.train.trainer.gpus = 0
cfg.nn.data.num_workers.train = 0
cfg.nn.data.num_workers.val = 0
cfg.nn.data.num_workers.test = 0
cfg.core.tags = enforce_tags(cfg.core.get("tags", None))
# Instantiate datamodule
pylogger.info(f"Instantiating <{cfg.nn.data['_target_']}>")
datamodule: pl.LightningDataModule = hydra.utils.instantiate(cfg.nn.data, _recursive_=False)
metadata: Optional[MetaData] = getattr(datamodule, "metadata", None)
if metadata is None:
pylogger.warning(f"No 'metadata' attribute found in datamodule <{datamodule.__class__.__name__}>")
# Instantiate model
pylogger.info(f"Instantiating <{cfg.nn.module['_target_']}>")
model: pl.LightningModule = hydra.utils.instantiate(cfg.nn.module, _recursive_=False, metadata=metadata)
# Instantiate the callbacks
template_core: NNTemplateCore = NNTemplateCore(
restore_cfg=cfg.train.get("restore", None),
)
callbacks: List[Callback] = build_callbacks(cfg.train.callbacks, template_core)
storage_dir: str = cfg.core.storage_dir
logger: NNLogger = NNLogger(logging_cfg=cfg.train.logging, cfg=cfg, resume_id=template_core.resume_id)
pylogger.info("Instantiating the <Trainer>")
trainer = pl.Trainer(
default_root_dir=storage_dir,
plugins=[NNCheckpointIO(jailing_dir=logger.run_dir)],
logger=logger,
callbacks=callbacks,
**cfg.train.trainer,
)
pylogger.info("Starting training!")
trainer.fit(model=model, datamodule=datamodule, ckpt_path=template_core.trainer_ckpt_path)
if fast_dev_run:
pylogger.info("Skipping testing in 'fast_dev_run' mode!")
else:
if "test" in cfg.nn.data.datasets and trainer.checkpoint_callback.best_model_path is not None:
pylogger.info("Starting testing!")
trainer.test(datamodule=datamodule)
# Logger closing to release resources/avoid multi-run conflicts
if logger is not None:
logger.experiment.finish()
return logger.run_dir
@hydra.main(config_path=str(PROJECT_ROOT / "conf"), config_name="default")
def main(cfg: omegaconf.DictConfig):
run(cfg)
if __name__ == "__main__":
main()
| 3,995 | 32.579832 | 114 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/__init__.py | import logging
from nn_core.console_logging import NNRichHandler
# Required workaround because PyTorch Lightning configures the logging on import,
# thus the logging configuration defined in the __init__.py must be called before
# the lightning import otherwise it has no effect.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/1503
lightning_logger = logging.getLogger("pytorch_lightning")
# Remove all handlers associated with the lightning logger.
for handler in lightning_logger.handlers[:]:
lightning_logger.removeHandler(handler)
lightning_logger.propagate = True
FORMAT = "%(message)s"
logging.basicConfig(
format=FORMAT,
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[
NNRichHandler(
rich_tracebacks=True,
show_level=True,
show_path=True,
show_time=True,
omit_repeated_times=True,
)
],
)
try:
from ._version import __version__ as __version__
except ImportError:
import sys
print(
"Project not installed in the current env, activate the correct env or install it with:\n\tpip install -e .",
file=sys.stderr,
)
__version__ = "unknown"
| 1,213 | 28.609756 | 117 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/modules/module.py | from torch import nn
# https://medium.com/@nutanbhogendrasharma/pytorch-convolutional-neural-network-with-mnist-dataset-4e8a4265e118
class CNN(nn.Module):
def __init__(self, num_classes: int):
super(CNN, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
nn.SiLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(16, 32, 5, 1, 2),
nn.SiLU(),
nn.MaxPool2d(2),
)
self.conv2 = nn.Sequential()
self.out = nn.Linear(32 * 7 * 7, num_classes)
def forward(self, x):
x = self.model(x)
# [batch_size, 32 * 7 * 7]
x = x.view(x.size(0), -1)
output = self.out(x)
return output
| 887 | 27.645161 | 111 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/pl_modules/pl_module.py | import logging
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchmetrics
from torch.optim import Optimizer
from nn_core.common import PROJECT_ROOT
from nn_core.model_logging import NNLogger
from {{ cookiecutter.package_name }}.data.datamodule import MetaData
from {{ cookiecutter.package_name }}.modules.module import CNN
pylogger = logging.getLogger(__name__)
class MyLightningModule(pl.LightningModule):
logger: NNLogger
def __init__(self, metadata: Optional[MetaData] = None, *args, **kwargs) -> None:
super().__init__()
# Populate self.hparams with args and kwargs automagically!
# We want to skip metadata since it is saved separately by the NNCheckpointIO object.
# Be careful when modifying this instruction. If in doubt, don't do it :]
self.save_hyperparameters(logger=False, ignore=("metadata",))
self.metadata = metadata
# example
metric = torchmetrics.Accuracy()
self.train_accuracy = metric.clone()
self.val_accuracy = metric.clone()
self.test_accuracy = metric.clone()
self.model = CNN(num_classes=len(metadata.class_vocab))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Method for the forward pass.
'training_step', 'validation_step' and 'test_step' should call
this method in order to compute the output predictions and the loss.
Returns:
output_dict: forward output containing the predictions (output logits ecc...) and the loss if any.
"""
# example
return self.model(x)
def step(self, x, y) -> Mapping[str, Any]:
# example
logits = self(x)
loss = F.cross_entropy(logits, y)
return {"logits": logits.detach(), "loss": loss}
def training_step(self, batch: Any, batch_idx: int) -> Mapping[str, Any]:
# example
x, y = batch
step_out = self.step(x, y)
self.log_dict(
{"loss/train": step_out["loss"].cpu().detach()},
on_step=True,
on_epoch=True,
prog_bar=True,
)
self.train_accuracy(torch.softmax(step_out["logits"], dim=-1), y)
self.log_dict(
{
"acc/train": self.train_accuracy,
},
on_epoch=True,
)
return step_out
def validation_step(self, batch: Any, batch_idx: int) -> Mapping[str, Any]:
# example
x, y = batch
step_out = self.step(x, y)
self.log_dict(
{"loss/val": step_out["loss"].cpu().detach()},
on_step=False,
on_epoch=True,
prog_bar=True,
)
self.val_accuracy(torch.softmax(step_out["logits"], dim=-1), y)
self.log_dict(
{
"acc/val": self.val_accuracy,
},
on_epoch=True,
)
return step_out
def test_step(self, batch: Any, batch_idx: int) -> Mapping[str, Any]:
# example
x, y = batch
step_out = self.step(x, y)
self.log_dict(
{"loss/test": step_out["loss"].cpu().detach()},
)
self.test_accuracy(torch.softmax(step_out["logits"], dim=-1), y)
self.log_dict(
{
"acc/test": self.test_accuracy,
},
on_epoch=True,
)
return step_out
def configure_optimizers(
self,
) -> Union[Optimizer, Tuple[Sequence[Optimizer], Sequence[Any]]]:
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
"""
opt = hydra.utils.instantiate(self.hparams.optimizer, params=self.parameters(), _convert_="partial")
if "lr_scheduler" not in self.hparams:
return [opt]
scheduler = hydra.utils.instantiate(self.hparams.lr_scheduler, optimizer=opt)
return [opt], [scheduler]
@hydra.main(config_path=str(PROJECT_ROOT / "conf"), config_name="default")
def main(cfg: omegaconf.DictConfig) -> None:
"""Debug main to quickly develop the Lightning Module.
Args:
cfg: the hydra configuration
"""
_: pl.LightningModule = hydra.utils.instantiate(
cfg.model,
optim=cfg.optim,
_recursive_=False,
)
if __name__ == "__main__":
main()
| 5,083 | 30.190184 | 114 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/data/dataset.py | import hydra
import omegaconf
from torch.utils.data import Dataset
from torchvision.datasets import FashionMNIST
from nn_core.common import PROJECT_ROOT
from nn_core.nn_types import Split
class MyDataset(Dataset):
def __init__(self, split: Split, **kwargs):
super().__init__()
self.split: Split = split
# example
self.mnist = FashionMNIST(
kwargs["path"],
train=split == "train",
download=True,
transform=kwargs["transform"],
)
@property
def class_vocab(self):
return self.mnist.class_to_idx
def __len__(self) -> int:
# example
return len(self.mnist)
def __getitem__(self, index: int):
# example
return self.mnist[index]
def __repr__(self) -> str:
return f"MyDataset({self.split=}, n_instances={len(self)})"
@hydra.main(config_path=str(PROJECT_ROOT / "conf"), config_name="default")
def main(cfg: omegaconf.DictConfig) -> None:
"""Debug main to quickly develop the Dataset.
Args:
cfg: the hydra configuration
"""
_: Dataset = hydra.utils.instantiate(cfg.nn.data.datasets.train, split="train", _recursive_=False)
if __name__ == "__main__":
main()
| 1,247 | 23.470588 | 102 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/src/{{ cookiecutter.package_name }}/data/datamodule.py | import logging
from functools import cached_property, partial
from pathlib import Path
from typing import List, Mapping, Optional, Sequence, Union
import hydra
import omegaconf
import pytorch_lightning as pl
from omegaconf import DictConfig
from torch.utils.data import DataLoader, Dataset, random_split
from torch.utils.data.dataloader import default_collate
from torchvision import transforms
from nn_core.common import PROJECT_ROOT
from nn_core.nn_types import Split
pylogger = logging.getLogger(__name__)
class MetaData:
def __init__(self, class_vocab: Mapping[str, int]):
"""The data information the Lightning Module will be provided with.
This is a "bridge" between the Lightning DataModule and the Lightning Module.
There is no constraint on the class name nor in the stored information, as long as it exposes the
`save` and `load` methods.
The Lightning Module will receive an instance of MetaData when instantiated,
both in the train loop or when restored from a checkpoint.
This decoupling allows the architecture to be parametric (e.g. in the number of classes) and
DataModule/Trainer independent (useful in prediction scenarios).
MetaData should contain all the information needed at test time, derived from its train dataset.
Examples are the class names in a classification task or the vocabulary in NLP tasks.
MetaData exposes `save` and `load`. Those are two user-defined methods that specify
how to serialize and de-serialize the information contained in its attributes.
This is needed for the checkpointing restore to work properly.
Args:
class_vocab: association between class names and their indices
"""
# example
self.class_vocab: Mapping[str, int] = class_vocab
def save(self, dst_path: Path) -> None:
"""Serialize the MetaData attributes into the zipped checkpoint in dst_path.
Args:
dst_path: the root folder of the metadata inside the zipped checkpoint
"""
pylogger.debug(f"Saving MetaData to '{dst_path}'")
# example
(dst_path / "class_vocab.tsv").write_text(
"\n".join(f"{key}\t{value}" for key, value in self.class_vocab.items())
)
@staticmethod
def load(src_path: Path) -> "MetaData":
"""Deserialize the MetaData from the information contained inside the zipped checkpoint in src_path.
Args:
src_path: the root folder of the metadata inside the zipped checkpoint
Returns:
an instance of MetaData containing the information in the checkpoint
"""
pylogger.debug(f"Loading MetaData from '{src_path}'")
# example
lines = (src_path / "class_vocab.tsv").read_text(encoding="utf-8").splitlines()
class_vocab = {}
for line in lines:
key, value = line.strip().split("\t")
class_vocab[key] = value
return MetaData(
class_vocab=class_vocab,
)
def collate_fn(samples: List, split: Split, metadata: MetaData):
"""Custom collate function for dataloaders with access to split and metadata.
Args:
samples: A list of samples coming from the Dataset to be merged into a batch
split: The data split (e.g. train/val/test)
metadata: The MetaData instance coming from the DataModule or the restored checkpoint
Returns:
A batch generated from the given samples
"""
return default_collate(samples)
class MyDataModule(pl.LightningDataModule):
def __init__(
self,
datasets: DictConfig,
num_workers: DictConfig,
batch_size: DictConfig,
gpus: Optional[Union[List[int], str, int]],
# example
val_percentage: float,
):
super().__init__()
self.datasets = datasets
self.num_workers = num_workers
self.batch_size = batch_size
# https://pytorch-lightning.readthedocs.io/en/stable/common/trainer.html#gpus
self.pin_memory: bool = gpus is not None and str(gpus) != "0"
self.train_dataset: Optional[Dataset] = None
self.val_datasets: Optional[Sequence[Dataset]] = None
self.test_datasets: Optional[Sequence[Dataset]] = None
# example
self.val_percentage: float = val_percentage
@cached_property
def metadata(self) -> MetaData:
"""Data information to be fed to the Lightning Module as parameter.
Examples are vocabularies, number of classes...
Returns:
metadata: everything the model should know about the data, wrapped in a MetaData object.
"""
# Since MetaData depends on the training data, we need to ensure the setup method has been called.
if self.train_dataset is None:
self.setup(stage="fit")
return MetaData(class_vocab=self.train_dataset.dataset.class_vocab)
def prepare_data(self) -> None:
# download only
pass
def setup(self, stage: Optional[str] = None):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# Here you should instantiate your datasets, you may also split the train into train and validation if needed.
if (stage is None or stage == "fit") and (self.train_dataset is None and self.val_datasets is None):
# example
mnist_train = hydra.utils.instantiate(
self.datasets.train,
split="train",
transform=transform,
path=PROJECT_ROOT / "data",
)
train_length = int(len(mnist_train) * (1 - self.val_percentage))
val_length = len(mnist_train) - train_length
self.train_dataset, val_dataset = random_split(mnist_train, [train_length, val_length])
self.val_datasets = [val_dataset]
if stage is None or stage == "test":
self.test_datasets = [
hydra.utils.instantiate(
dataset_cfg,
split="test",
path=PROJECT_ROOT / "data",
transform=transform,
)
for dataset_cfg in self.datasets.test
]
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset,
shuffle=True,
batch_size=self.batch_size.train,
num_workers=self.num_workers.train,
pin_memory=self.pin_memory,
collate_fn=partial(collate_fn, split="train", metadata=self.metadata),
)
def val_dataloader(self) -> Sequence[DataLoader]:
return [
DataLoader(
dataset,
shuffle=False,
batch_size=self.batch_size.val,
num_workers=self.num_workers.val,
pin_memory=self.pin_memory,
collate_fn=partial(collate_fn, split="val", metadata=self.metadata),
)
for dataset in self.val_datasets
]
def test_dataloader(self) -> Sequence[DataLoader]:
return [
DataLoader(
dataset,
shuffle=False,
batch_size=self.batch_size.test,
num_workers=self.num_workers.test,
pin_memory=self.pin_memory,
collate_fn=partial(collate_fn, split="test", metadata=self.metadata),
)
for dataset in self.test_datasets
]
def __repr__(self) -> str:
return f"{self.__class__.__name__}(" f"{self.datasets=}, " f"{self.num_workers=}, " f"{self.batch_size=})"
@hydra.main(config_path=str(PROJECT_ROOT / "conf"), config_name="default")
def main(cfg: omegaconf.DictConfig) -> None:
"""Debug main to quickly develop the DataModule.
Args:
cfg: the hydra configuration
"""
_: pl.LightningDataModule = hydra.utils.instantiate(cfg.data.datamodule, _recursive_=False)
if __name__ == "__main__":
main()
| 8,091 | 35.45045 | 118 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/tests/test_checkpoint.py | from importlib import import_module
from pathlib import Path
from typing import Any, Dict
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import LightningModule
from pytorch_lightning.core.saving import _load_state
from nn_core.serialization import NNCheckpointIO
from tests.conftest import load_checkpoint
from {{ cookiecutter.package_name }}.pl_modules.pl_module import MyLightningModule
from {{ cookiecutter.package_name }}.run import run
def test_load_checkpoint(run_trainings_not_dry: str, cfg_all_not_dry: DictConfig) -> None:
ckpts_path = Path(run_trainings_not_dry) / "checkpoints"
checkpoint_path = next(ckpts_path.glob("*"))
assert checkpoint_path
reference: str = cfg_all_not_dry.nn.module._target_
module_ref, class_ref = reference.rsplit(".", maxsplit=1)
module_class: LightningModule = getattr(import_module(module_ref), class_ref)
assert module_class is not None
checkpoint = NNCheckpointIO.load(path=checkpoint_path)
module = _load_state(cls=module_class, checkpoint=checkpoint, metadata=checkpoint["metadata"])
assert module is not None
assert sum(p.numel() for p in module.parameters())
def _check_cfg_in_checkpoint(checkpoint: Dict, _cfg: DictConfig) -> Dict:
assert "cfg" in checkpoint
assert checkpoint["cfg"] == _cfg
def _check_run_path_in_checkpoint(checkpoint: Dict) -> Dict:
assert "run_path" in checkpoint
assert checkpoint["run_path"]
checkpoint["run_path"]: str
assert checkpoint["run_path"].startswith("//")
def test_cfg_in_checkpoint(run_trainings_not_dry: str, cfg_all_not_dry: DictConfig) -> None:
checkpoint = load_checkpoint(run_trainings_not_dry)
_check_cfg_in_checkpoint(checkpoint, cfg_all_not_dry)
_check_run_path_in_checkpoint(checkpoint)
class ModuleWithCustomCheckpoint(MyLightningModule):
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint["test_key"] = "test_value"
def test_on_save_checkpoint_hook(cfg_all_not_dry: DictConfig) -> None:
cfg = OmegaConf.create(cfg_all_not_dry)
cfg.nn.module._target_ = "tests.test_checkpoint.ModuleWithCustomCheckpoint"
output_path = Path(run(cfg))
checkpoint = load_checkpoint(output_path)
_check_cfg_in_checkpoint(checkpoint, cfg)
_check_run_path_in_checkpoint(checkpoint)
assert "test_key" in checkpoint
assert checkpoint["test_key"] == "test_value"
| 2,420 | 34.086957 | 98 | py |
nn-template | nn-template-main/{{ cookiecutter.repository_name }}/tests/conftest.py | import logging
import os
import shutil
from pathlib import Path
from typing import Dict, Union
import pytest
from hydra import compose, initialize
from hydra.core.hydra_config import HydraConfig
from omegaconf import DictConfig, OmegaConf, open_dict
from pytest import FixtureRequest, TempPathFactory
from pytorch_lightning import seed_everything
from nn_core.serialization import NNCheckpointIO
from {{ cookiecutter.package_name }}.run import run
logging.basicConfig(force=True, level=logging.DEBUG)
seed_everything(42)
TRAIN_MAX_NSTEPS = 1
#
# Base configurations
#
@pytest.fixture(scope="package")
def cfg(tmp_path_factory: TempPathFactory) -> DictConfig:
test_cfg_tmpdir = tmp_path_factory.mktemp("test_train_tmpdir")
with initialize(config_path="../conf"):
cfg = compose(config_name="default", return_hydra_config=True)
HydraConfig().set_config(cfg)
# Force the wandb dir to be in the temp folder
os.environ["WANDB_DIR"] = str(test_cfg_tmpdir)
# Force the storage dir to be in the temp folder
cfg.core.storage_dir = str(test_cfg_tmpdir)
yield cfg
shutil.rmtree(test_cfg_tmpdir)
#
# Training configurations
#
@pytest.fixture(scope="package")
def cfg_simple_train(cfg: DictConfig) -> DictConfig:
cfg = OmegaConf.create(cfg)
# Add test tag
cfg.core.tags = ["testing"]
# Disable gpus
cfg.train.trainer.gpus = 0
# Disable logger
cfg.train.logging.logger.mode = "disabled"
# Disable files upload because wandb in offline modes uses always /tmp
# as run.dir, which causes conflicts between multiple trainings
cfg.train.logging.upload.run_files = False
# Disable multiple workers in test training
cfg.nn.data.num_workers.train = 0
cfg.nn.data.num_workers.val = 0
cfg.nn.data.num_workers.test = 0
# Minimize the amount of work in test training
cfg.train.trainer.max_steps = TRAIN_MAX_NSTEPS
cfg.train.trainer.val_check_interval = TRAIN_MAX_NSTEPS
# Ensure the resuming is disabled
with open_dict(config=cfg):
cfg.train.restore = {}
cfg.train.restore.ckpt_or_run_path = None
cfg.train.restore.mode = None
return cfg
@pytest.fixture(scope="package")
def cfg_fast_dev_run(cfg_simple_train: DictConfig) -> DictConfig:
cfg_simple_train = OmegaConf.create(cfg_simple_train)
# Enable the fast_dev_run flag
cfg_simple_train.train.trainer.fast_dev_run = True
return cfg_simple_train
#
# Training configurations aggregations
#
@pytest.fixture(
scope="package",
params=[
"cfg_simple_train",
],
)
def cfg_all_not_dry(request: FixtureRequest):
return request.getfixturevalue(request.param)
@pytest.fixture(
scope="package",
params=[
"cfg_simple_train",
"cfg_fast_dev_run",
],
)
def cfg_all(request: FixtureRequest):
return request.getfixturevalue(request.param)
#
# Training fixtures
#
@pytest.fixture(
scope="package",
)
def run_trainings_not_dry(cfg_all_not_dry: DictConfig) -> str:
yield run(cfg=cfg_all_not_dry)
@pytest.fixture(
scope="package",
)
def run_trainings(cfg_all: DictConfig) -> str:
yield run(cfg=cfg_all)
#
# Utility functions
#
def get_checkpoint_path(storagedir: Union[str, Path]) -> Path:
ckpts_path = Path(storagedir) / "checkpoints"
checkpoint_path = next(ckpts_path.glob("*"))
assert checkpoint_path
return checkpoint_path
def load_checkpoint(storagedir: Union[str, Path]) -> Dict:
checkpoint = NNCheckpointIO.load(path=get_checkpoint_path(storagedir))
assert checkpoint
return checkpoint
| 3,621 | 23.308725 | 74 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/translate.py | ''' Translate input text with trained model. '''
import torch
import argparse
import dill as pickle
from tqdm import tqdm
import transformer.Constants as Constants
from torchtext.data import Dataset
from transformer.Models import Transformer
from transformer.Translator import Translator
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
model = Transformer(
model_opt.src_vocab_size,
model_opt.trg_vocab_size,
model_opt.src_pad_idx,
model_opt.trg_pad_idx,
trg_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_trg_weight_sharing=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout).to(device)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
return model
def main():
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model weight file')
parser.add_argument('-data_pkl', required=True,
help='Pickle file with both instances and vocabulary.')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5)
parser.add_argument('-max_seq_len', type=int, default=100)
parser.add_argument('-no_cuda', action='store_true')
# TODO: Translate bpe encoded files
#parser.add_argument('-src', required=True,
# help='Source sequence to decode (one line per sequence)')
#parser.add_argument('-vocab', required=True,
# help='Source sequence to decode (one line per sequence)')
# TODO: Batch translation
#parser.add_argument('-batch_size', type=int, default=30,
# help='Batch size')
#parser.add_argument('-n_best', type=int, default=1,
# help="""If verbose is set, will output the n_best
# decoded sentences""")
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
data = pickle.load(open(opt.data_pkl, 'rb'))
SRC, TRG = data['vocab']['src'], data['vocab']['trg']
opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]
opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]
opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]
test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG})
device = torch.device('cuda' if opt.cuda else 'cpu')
translator = Translator(
model=load_model(opt, device),
beam_size=opt.beam_size,
max_seq_len=opt.max_seq_len,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_bos_idx=opt.trg_bos_idx,
trg_eos_idx=opt.trg_eos_idx).to(device)
unk_idx = SRC.vocab.stoi[SRC.unk_token]
with open(opt.output, 'w') as f:
for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
#print(' '.join(example.src))
src_seq = [SRC.vocab.stoi.get(word, unk_idx) for word in example.src]
pred_seq = translator.translate_sentence(torch.LongTensor([src_seq]).to(device))
pred_line = ' '.join(TRG.vocab.itos[idx] for idx in pred_seq)
pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '')
#print(pred_line)
f.write(pred_line.strip() + '\n')
print('[Info] Finished.')
if __name__ == "__main__":
'''
Usage: python translate.py -model trained.chkpt -data multi30k.pt -no_cuda
'''
main()
| 4,077 | 36.072727 | 97 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/train.py | '''
This script handles the training process.
'''
import argparse
import math
import time
import dill as pickle
from tqdm import tqdm
import numpy as np
import random
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchtext.data import Field, Dataset, BucketIterator
from torchtext.datasets import TranslationDataset
import transformer.Constants as Constants
from transformer.Models import Transformer
from transformer.Optim import ScheduledOptim
__author__ = "Yu-Hsiang Huang"
def cal_performance(pred, gold, trg_pad_idx, smoothing=False):
''' Apply label smoothing if needed '''
loss = cal_loss(pred, gold, trg_pad_idx, smoothing=smoothing)
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
non_pad_mask = gold.ne(trg_pad_idx)
n_correct = pred.eq(gold).masked_select(non_pad_mask).sum().item()
n_word = non_pad_mask.sum().item()
return loss, n_correct, n_word
def cal_loss(pred, gold, trg_pad_idx, smoothing=False):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(trg_pad_idx)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() # average later
else:
loss = F.cross_entropy(pred, gold, ignore_index=trg_pad_idx, reduction='sum')
return loss
def patch_src(src, pad_idx):
src = src.transpose(0, 1)
return src
def patch_trg(trg, pad_idx):
trg = trg.transpose(0, 1)
trg, gold = trg[:, :-1], trg[:, 1:].contiguous().view(-1)
return trg, gold
def train_epoch(model, training_data, optimizer, opt, device, smoothing):
''' Epoch operation in training phase'''
model.train()
total_loss, n_word_total, n_word_correct = 0, 0, 0
desc = ' - (Training) '
for batch in tqdm(training_data, mininterval=2, desc=desc, leave=False):
# prepare data
src_seq = patch_src(batch.src, opt.src_pad_idx).to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(batch.trg, opt.trg_pad_idx))
# forward
optimizer.zero_grad()
pred = model(src_seq, trg_seq)
# backward and update parameters
loss, n_correct, n_word = cal_performance(
pred, gold, opt.trg_pad_idx, smoothing=smoothing)
loss.backward()
optimizer.step_and_update_lr()
# note keeping
n_word_total += n_word
n_word_correct += n_correct
total_loss += loss.item()
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def eval_epoch(model, validation_data, device, opt):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss, n_word_total, n_word_correct = 0, 0, 0
desc = ' - (Validation) '
with torch.no_grad():
for batch in tqdm(validation_data, mininterval=2, desc=desc, leave=False):
# prepare data
src_seq = patch_src(batch.src, opt.src_pad_idx).to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(batch.trg, opt.trg_pad_idx))
# forward
pred = model(src_seq, trg_seq)
loss, n_correct, n_word = cal_performance(
pred, gold, opt.trg_pad_idx, smoothing=False)
# note keeping
n_word_total += n_word
n_word_correct += n_correct
total_loss += loss.item()
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def train(model, training_data, validation_data, optimizer, device, opt):
''' Start training '''
# Use tensorboard to plot curves, e.g. perplexity, accuracy, learning rate
if opt.use_tb:
print("[Info] Use Tensorboard")
from torch.utils.tensorboard import SummaryWriter
tb_writer = SummaryWriter(log_dir=os.path.join(opt.output_dir, 'tensorboard'))
log_train_file = os.path.join(opt.output_dir, 'train.log')
log_valid_file = os.path.join(opt.output_dir, 'valid.log')
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
def print_performances(header, ppl, accu, start_time, lr):
print(' - {header:12} ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, lr: {lr:8.5f}, '\
'elapse: {elapse:3.3f} min'.format(
header=f"({header})", ppl=ppl,
accu=100*accu, elapse=(time.time()-start_time)/60, lr=lr))
#valid_accus = []
valid_losses = []
for epoch_i in range(opt.epoch):
print('[ Epoch', epoch_i, ']')
start = time.time()
train_loss, train_accu = train_epoch(
model, training_data, optimizer, opt, device, smoothing=opt.label_smoothing)
train_ppl = math.exp(min(train_loss, 100))
# Current learning rate
lr = optimizer._optimizer.param_groups[0]['lr']
print_performances('Training', train_ppl, train_accu, start, lr)
start = time.time()
valid_loss, valid_accu = eval_epoch(model, validation_data, device, opt)
valid_ppl = math.exp(min(valid_loss, 100))
print_performances('Validation', valid_ppl, valid_accu, start, lr)
valid_losses += [valid_loss]
checkpoint = {'epoch': epoch_i, 'settings': opt, 'model': model.state_dict()}
if opt.save_mode == 'all':
model_name = 'model_accu_{accu:3.3f}.chkpt'.format(accu=100*valid_accu)
torch.save(checkpoint, model_name)
elif opt.save_mode == 'best':
model_name = 'model.chkpt'
if valid_loss <= min(valid_losses):
torch.save(checkpoint, os.path.join(opt.output_dir, model_name))
print(' - [Info] The checkpoint file has been updated.')
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=train_loss,
ppl=train_ppl, accu=100*train_accu))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=valid_loss,
ppl=valid_ppl, accu=100*valid_accu))
if opt.use_tb:
tb_writer.add_scalars('ppl', {'train': train_ppl, 'val': valid_ppl}, epoch_i)
tb_writer.add_scalars('accuracy', {'train': train_accu*100, 'val': valid_accu*100}, epoch_i)
tb_writer.add_scalar('learning_rate', lr, epoch_i)
def main():
'''
Usage:
python train.py -data_pkl m30k_deen_shr.pkl -log m30k_deen_shr -embs_share_weight -proj_share_weight -label_smoothing -output_dir output -b 256 -warmup 128000
'''
parser = argparse.ArgumentParser()
parser.add_argument('-data_pkl', default=None) # all-in-1 data pickle or bpe field
parser.add_argument('-train_path', default=None) # bpe encoded data
parser.add_argument('-val_path', default=None) # bpe encoded data
parser.add_argument('-epoch', type=int, default=10)
parser.add_argument('-b', '--batch_size', type=int, default=2048)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-d_inner_hid', type=int, default=2048)
parser.add_argument('-d_k', type=int, default=64)
parser.add_argument('-d_v', type=int, default=64)
parser.add_argument('-n_head', type=int, default=8)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-warmup','--n_warmup_steps', type=int, default=4000)
parser.add_argument('-lr_mul', type=float, default=2.0)
parser.add_argument('-seed', type=int, default=None)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-embs_share_weight', action='store_true')
parser.add_argument('-proj_share_weight', action='store_true')
parser.add_argument('-scale_emb_or_prj', type=str, default='prj')
parser.add_argument('-output_dir', type=str, default=None)
parser.add_argument('-use_tb', action='store_true')
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-label_smoothing', action='store_true')
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.d_word_vec = opt.d_model
# https://pytorch.org/docs/stable/notes/randomness.html
# For reproducibility
if opt.seed is not None:
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = False
# torch.set_deterministic(True)
np.random.seed(opt.seed)
random.seed(opt.seed)
if not opt.output_dir:
print('No experiment result will be saved.')
raise
if not os.path.exists(opt.output_dir):
os.makedirs(opt.output_dir)
if opt.batch_size < 2048 and opt.n_warmup_steps <= 4000:
print('[Warning] The warmup steps may be not enough.\n'\
'(sz_b, warmup) = (2048, 4000) is the official setting.\n'\
'Using smaller batch w/o longer warmup may cause '\
'the warmup stage ends with only little data trained.')
device = torch.device('cuda' if opt.cuda else 'cpu')
#========= Loading Dataset =========#
if all((opt.train_path, opt.val_path)):
training_data, validation_data = prepare_dataloaders_from_bpe_files(opt, device)
elif opt.data_pkl:
training_data, validation_data = prepare_dataloaders(opt, device)
else:
raise
print(opt)
transformer = Transformer(
opt.src_vocab_size,
opt.trg_vocab_size,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_emb_prj_weight_sharing=opt.proj_share_weight,
emb_src_trg_weight_sharing=opt.embs_share_weight,
d_k=opt.d_k,
d_v=opt.d_v,
d_model=opt.d_model,
d_word_vec=opt.d_word_vec,
d_inner=opt.d_inner_hid,
n_layers=opt.n_layers,
n_head=opt.n_head,
dropout=opt.dropout,
scale_emb_or_prj=opt.scale_emb_or_prj).to(device)
optimizer = ScheduledOptim(
optim.Adam(transformer.parameters(), betas=(0.9, 0.98), eps=1e-09),
opt.lr_mul, opt.d_model, opt.n_warmup_steps)
train(transformer, training_data, validation_data, optimizer, device, opt)
def prepare_dataloaders_from_bpe_files(opt, device):
batch_size = opt.batch_size
MIN_FREQ = 2
if not opt.embs_share_weight:
raise
data = pickle.load(open(opt.data_pkl, 'rb'))
MAX_LEN = data['settings'].max_len
field = data['vocab']
fields = (field, field)
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
train = TranslationDataset(
fields=fields,
path=opt.train_path,
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
val = TranslationDataset(
fields=fields,
path=opt.val_path,
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
opt.max_token_seq_len = MAX_LEN + 2
opt.src_pad_idx = opt.trg_pad_idx = field.vocab.stoi[Constants.PAD_WORD]
opt.src_vocab_size = opt.trg_vocab_size = len(field.vocab)
train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
val_iterator = BucketIterator(val, batch_size=batch_size, device=device)
return train_iterator, val_iterator
def prepare_dataloaders(opt, device):
batch_size = opt.batch_size
data = pickle.load(open(opt.data_pkl, 'rb'))
opt.max_token_seq_len = data['settings'].max_len
opt.src_pad_idx = data['vocab']['src'].vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = data['vocab']['trg'].vocab.stoi[Constants.PAD_WORD]
opt.src_vocab_size = len(data['vocab']['src'].vocab)
opt.trg_vocab_size = len(data['vocab']['trg'].vocab)
#========= Preparing Model =========#
if opt.embs_share_weight:
assert data['vocab']['src'].vocab.stoi == data['vocab']['trg'].vocab.stoi, \
'To sharing word embedding the src/trg word2idx table shall be the same.'
fields = {'src': data['vocab']['src'], 'trg':data['vocab']['trg']}
train = Dataset(examples=data['train'], fields=fields)
val = Dataset(examples=data['valid'], fields=fields)
train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
val_iterator = BucketIterator(val, batch_size=batch_size, device=device)
return train_iterator, val_iterator
if __name__ == '__main__':
main()
| 13,173 | 34.798913 | 162 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/preprocess.py | ''' Handling the data io '''
import os
import argparse
import logging
import dill as pickle
import urllib
from tqdm import tqdm
import sys
import codecs
import spacy
import torch
import tarfile
import torchtext.data
import torchtext.datasets
from torchtext.datasets import TranslationDataset
import transformer.Constants as Constants
from learn_bpe import learn_bpe
from apply_bpe import BPE
__author__ = "Yu-Hsiang Huang"
_TRAIN_DATA_SOURCES = [
{"url": "http://data.statmt.org/wmt17/translation-task/" \
"training-parallel-nc-v12.tgz",
"trg": "news-commentary-v12.de-en.en",
"src": "news-commentary-v12.de-en.de"},
#{"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
# "trg": "commoncrawl.de-en.en",
# "src": "commoncrawl.de-en.de"},
#{"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
# "trg": "europarl-v7.de-en.en",
# "src": "europarl-v7.de-en.de"}
]
_VAL_DATA_SOURCES = [
{"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"trg": "newstest2013.en",
"src": "newstest2013.de"}]
_TEST_DATA_SOURCES = [
{"url": "https://storage.googleapis.com/tf-perf-public/" \
"official_transformer/test_data/newstest2014.tgz",
"trg": "newstest2014.en",
"src": "newstest2014.de"}]
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def file_exist(dir_name, file_name):
for sub_dir, _, files in os.walk(dir_name):
if file_name in files:
return os.path.join(sub_dir, file_name)
return None
def download_and_extract(download_dir, url, src_filename, trg_filename):
src_path = file_exist(download_dir, src_filename)
trg_path = file_exist(download_dir, trg_filename)
if src_path and trg_path:
sys.stderr.write(f"Already downloaded and extracted {url}.\n")
return src_path, trg_path
compressed_file = _download_file(download_dir, url)
sys.stderr.write(f"Extracting {compressed_file}.\n")
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(download_dir)
src_path = file_exist(download_dir, src_filename)
trg_path = file_exist(download_dir, trg_filename)
if src_path and trg_path:
return src_path, trg_path
raise OSError(f"Download/extraction failed for url {url} to path {download_dir}")
def _download_file(download_dir, url):
filename = url.split("/")[-1]
if file_exist(download_dir, filename):
sys.stderr.write(f"Already downloaded: {url} (at {filename}).\n")
else:
sys.stderr.write(f"Downloading from {url} to {filename}.\n")
with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
urllib.request.urlretrieve(url, filename=filename, reporthook=t.update_to)
return filename
def get_raw_files(raw_dir, sources):
raw_files = { "src": [], "trg": [], }
for d in sources:
src_file, trg_file = download_and_extract(raw_dir, d["url"], d["src"], d["trg"])
raw_files["src"].append(src_file)
raw_files["trg"].append(trg_file)
return raw_files
def mkdir_if_needed(dir_name):
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
def compile_files(raw_dir, raw_files, prefix):
src_fpath = os.path.join(raw_dir, f"raw-{prefix}.src")
trg_fpath = os.path.join(raw_dir, f"raw-{prefix}.trg")
if os.path.isfile(src_fpath) and os.path.isfile(trg_fpath):
sys.stderr.write(f"Merged files found, skip the merging process.\n")
return src_fpath, trg_fpath
sys.stderr.write(f"Merge files into two files: {src_fpath} and {trg_fpath}.\n")
with open(src_fpath, 'w') as src_outf, open(trg_fpath, 'w') as trg_outf:
for src_inf, trg_inf in zip(raw_files['src'], raw_files['trg']):
sys.stderr.write(f' Input files: \n'\
f' - SRC: {src_inf}, and\n' \
f' - TRG: {trg_inf}.\n')
with open(src_inf, newline='\n') as src_inf, open(trg_inf, newline='\n') as trg_inf:
cntr = 0
for i, line in enumerate(src_inf):
cntr += 1
src_outf.write(line.replace('\r', ' ').strip() + '\n')
for j, line in enumerate(trg_inf):
cntr -= 1
trg_outf.write(line.replace('\r', ' ').strip() + '\n')
assert cntr == 0, 'Number of lines in two files are inconsistent.'
return src_fpath, trg_fpath
def encode_file(bpe, in_file, out_file):
sys.stderr.write(f"Read raw content from {in_file} and \n"\
f"Write encoded content to {out_file}\n")
with codecs.open(in_file, encoding='utf-8') as in_f:
with codecs.open(out_file, 'w', encoding='utf-8') as out_f:
for line in in_f:
out_f.write(bpe.process_line(line))
def encode_files(bpe, src_in_file, trg_in_file, data_dir, prefix):
src_out_file = os.path.join(data_dir, f"{prefix}.src")
trg_out_file = os.path.join(data_dir, f"{prefix}.trg")
if os.path.isfile(src_out_file) and os.path.isfile(trg_out_file):
sys.stderr.write(f"Encoded files found, skip the encoding process ...\n")
encode_file(bpe, src_in_file, src_out_file)
encode_file(bpe, trg_in_file, trg_out_file)
return src_out_file, trg_out_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-raw_dir', required=True)
parser.add_argument('-data_dir', required=True)
parser.add_argument('-codes', required=True)
parser.add_argument('-save_data', required=True)
parser.add_argument('-prefix', required=True)
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('--symbols', '-s', type=int, default=32000, help="Vocabulary size")
parser.add_argument(
'--min-frequency', type=int, default=6, metavar='FREQ',
help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s))')
parser.add_argument('--dict-input', action="store_true",
help="If set, input file is interpreted as a dictionary where each line contains a word-count pair")
parser.add_argument(
'--separator', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
parser.add_argument('--total-symbols', '-t', action="store_true")
opt = parser.parse_args()
# Create folder if needed.
mkdir_if_needed(opt.raw_dir)
mkdir_if_needed(opt.data_dir)
# Download and extract raw data.
raw_train = get_raw_files(opt.raw_dir, _TRAIN_DATA_SOURCES)
raw_val = get_raw_files(opt.raw_dir, _VAL_DATA_SOURCES)
raw_test = get_raw_files(opt.raw_dir, _TEST_DATA_SOURCES)
# Merge files into one.
train_src, train_trg = compile_files(opt.raw_dir, raw_train, opt.prefix + '-train')
val_src, val_trg = compile_files(opt.raw_dir, raw_val, opt.prefix + '-val')
test_src, test_trg = compile_files(opt.raw_dir, raw_test, opt.prefix + '-test')
# Build up the code from training files if not exist
opt.codes = os.path.join(opt.data_dir, opt.codes)
if not os.path.isfile(opt.codes):
sys.stderr.write(f"Collect codes from training data and save to {opt.codes}.\n")
learn_bpe(raw_train['src'] + raw_train['trg'], opt.codes, opt.symbols, opt.min_frequency, True)
sys.stderr.write(f"BPE codes prepared.\n")
sys.stderr.write(f"Build up the tokenizer.\n")
with codecs.open(opt.codes, encoding='utf-8') as codes:
bpe = BPE(codes, separator=opt.separator)
sys.stderr.write(f"Encoding ...\n")
encode_files(bpe, train_src, train_trg, opt.data_dir, opt.prefix + '-train')
encode_files(bpe, val_src, val_trg, opt.data_dir, opt.prefix + '-val')
encode_files(bpe, test_src, test_trg, opt.data_dir, opt.prefix + '-test')
sys.stderr.write(f"Done.\n")
field = torchtext.data.Field(
tokenize=str.split,
lower=True,
pad_token=Constants.PAD_WORD,
init_token=Constants.BOS_WORD,
eos_token=Constants.EOS_WORD)
fields = (field, field)
MAX_LEN = opt.max_len
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
enc_train_files_prefix = opt.prefix + '-train'
train = TranslationDataset(
fields=fields,
path=os.path.join(opt.data_dir, enc_train_files_prefix),
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
from itertools import chain
field.build_vocab(chain(train.src, train.trg), min_freq=2)
data = { 'settings': opt, 'vocab': field, }
opt.save_data = os.path.join(opt.data_dir, opt.save_data)
print('[Info] Dumping the processed data to pickle file', opt.save_data)
pickle.dump(data, open(opt.save_data, 'wb'))
def main_wo_bpe():
'''
Usage: python preprocess.py -lang_src de -lang_trg en -save_data multi30k_de_en.pkl -share_vocab
'''
spacy_support_langs = ['de', 'el', 'en', 'es', 'fr', 'it', 'lt', 'nb', 'nl', 'pt']
parser = argparse.ArgumentParser()
parser.add_argument('-lang_src', required=True, choices=spacy_support_langs)
parser.add_argument('-lang_trg', required=True, choices=spacy_support_langs)
parser.add_argument('-save_data', required=True)
parser.add_argument('-data_src', type=str, default=None)
parser.add_argument('-data_trg', type=str, default=None)
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('-min_word_count', type=int, default=3)
parser.add_argument('-keep_case', action='store_true')
parser.add_argument('-share_vocab', action='store_true')
#parser.add_argument('-ratio', '--train_valid_test_ratio', type=int, nargs=3, metavar=(8,1,1))
#parser.add_argument('-vocab', default=None)
opt = parser.parse_args()
assert not any([opt.data_src, opt.data_trg]), 'Custom data input is not support now.'
assert not any([opt.data_src, opt.data_trg]) or all([opt.data_src, opt.data_trg])
print(opt)
src_lang_model = spacy.load(opt.lang_src)
trg_lang_model = spacy.load(opt.lang_trg)
def tokenize_src(text):
return [tok.text for tok in src_lang_model.tokenizer(text)]
def tokenize_trg(text):
return [tok.text for tok in trg_lang_model.tokenizer(text)]
SRC = torchtext.data.Field(
tokenize=tokenize_src, lower=not opt.keep_case,
pad_token=Constants.PAD_WORD, init_token=Constants.BOS_WORD, eos_token=Constants.EOS_WORD)
TRG = torchtext.data.Field(
tokenize=tokenize_trg, lower=not opt.keep_case,
pad_token=Constants.PAD_WORD, init_token=Constants.BOS_WORD, eos_token=Constants.EOS_WORD)
MAX_LEN = opt.max_len
MIN_FREQ = opt.min_word_count
if not all([opt.data_src, opt.data_trg]):
assert {opt.lang_src, opt.lang_trg} == {'de', 'en'}
else:
# Pack custom txt file into example datasets
raise NotImplementedError
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
train, val, test = torchtext.datasets.Multi30k.splits(
exts = ('.' + opt.lang_src, '.' + opt.lang_trg),
fields = (SRC, TRG),
filter_pred=filter_examples_with_length)
SRC.build_vocab(train.src, min_freq=MIN_FREQ)
print('[Info] Get source language vocabulary size:', len(SRC.vocab))
TRG.build_vocab(train.trg, min_freq=MIN_FREQ)
print('[Info] Get target language vocabulary size:', len(TRG.vocab))
if opt.share_vocab:
print('[Info] Merging two vocabulary ...')
for w, _ in SRC.vocab.stoi.items():
# TODO: Also update the `freq`, although it is not likely to be used.
if w not in TRG.vocab.stoi:
TRG.vocab.stoi[w] = len(TRG.vocab.stoi)
TRG.vocab.itos = [None] * len(TRG.vocab.stoi)
for w, i in TRG.vocab.stoi.items():
TRG.vocab.itos[i] = w
SRC.vocab.stoi = TRG.vocab.stoi
SRC.vocab.itos = TRG.vocab.itos
print('[Info] Get merged vocabulary size:', len(TRG.vocab))
data = {
'settings': opt,
'vocab': {'src': SRC, 'trg': TRG},
'train': train.examples,
'valid': val.examples,
'test': test.examples}
print('[Info] Dumping the processed data to pickle file', opt.save_data)
pickle.dump(data, open(opt.save_data, 'wb'))
if __name__ == '__main__':
main_wo_bpe()
#main()
| 12,646 | 36.52819 | 108 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/transformer/Layers.py | ''' Define the Layers '''
import torch.nn as nn
import torch
from transformer.SubLayers import MultiHeadAttention, PositionwiseFeedForward
__author__ = "Yu-Hsiang Huang"
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
class DecoderLayer(nn.Module):
''' Compose with three layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(
self, dec_input, enc_output,
slf_attn_mask=None, dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(
dec_input, dec_input, dec_input, mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(
dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)
dec_output = self.pos_ffn(dec_output)
return dec_output, dec_slf_attn, dec_enc_attn
| 1,684 | 38.186047 | 86 | py |
attention-is-all-you-need-pytorch | attention-is-all-you-need-pytorch-master/transformer/Translator.py | ''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformer.Models import Transformer, get_pad_mask, get_subsequent_mask
class Translator(nn.Module):
''' Load a trained model and translate in beam search fashion. '''
def __init__(
self, model, beam_size, max_seq_len,
src_pad_idx, trg_pad_idx, trg_bos_idx, trg_eos_idx):
super(Translator, self).__init__()
self.alpha = 0.7
self.beam_size = beam_size
self.max_seq_len = max_seq_len
self.src_pad_idx = src_pad_idx
self.trg_bos_idx = trg_bos_idx
self.trg_eos_idx = trg_eos_idx
self.model = model
self.model.eval()
self.register_buffer('init_seq', torch.LongTensor([[trg_bos_idx]]))
self.register_buffer(
'blank_seqs',
torch.full((beam_size, max_seq_len), trg_pad_idx, dtype=torch.long))
self.blank_seqs[:, 0] = self.trg_bos_idx
self.register_buffer(
'len_map',
torch.arange(1, max_seq_len + 1, dtype=torch.long).unsqueeze(0))
def _model_decode(self, trg_seq, enc_output, src_mask):
trg_mask = get_subsequent_mask(trg_seq)
dec_output, *_ = self.model.decoder(trg_seq, trg_mask, enc_output, src_mask)
return F.softmax(self.model.trg_word_prj(dec_output), dim=-1)
def _get_init_state(self, src_seq, src_mask):
beam_size = self.beam_size
enc_output, *_ = self.model.encoder(src_seq, src_mask)
dec_output = self._model_decode(self.init_seq, enc_output, src_mask)
best_k_probs, best_k_idx = dec_output[:, -1, :].topk(beam_size)
scores = torch.log(best_k_probs).view(beam_size)
gen_seq = self.blank_seqs.clone().detach()
gen_seq[:, 1] = best_k_idx[0]
enc_output = enc_output.repeat(beam_size, 1, 1)
return enc_output, gen_seq, scores
def _get_the_best_score_and_idx(self, gen_seq, dec_output, scores, step):
assert len(scores.size()) == 1
beam_size = self.beam_size
# Get k candidates for each beam, k^2 candidates in total.
best_k2_probs, best_k2_idx = dec_output[:, -1, :].topk(beam_size)
# Include the previous scores.
scores = torch.log(best_k2_probs).view(beam_size, -1) + scores.view(beam_size, 1)
# Get the best k candidates from k^2 candidates.
scores, best_k_idx_in_k2 = scores.view(-1).topk(beam_size)
# Get the corresponding positions of the best k candidiates.
best_k_r_idxs, best_k_c_idxs = best_k_idx_in_k2 // beam_size, best_k_idx_in_k2 % beam_size
best_k_idx = best_k2_idx[best_k_r_idxs, best_k_c_idxs]
# Copy the corresponding previous tokens.
gen_seq[:, :step] = gen_seq[best_k_r_idxs, :step]
# Set the best tokens in this beam search step
gen_seq[:, step] = best_k_idx
return gen_seq, scores
def translate_sentence(self, src_seq):
# Only accept batch size equals to 1 in this function.
# TODO: expand to batch operation.
assert src_seq.size(0) == 1
src_pad_idx, trg_eos_idx = self.src_pad_idx, self.trg_eos_idx
max_seq_len, beam_size, alpha = self.max_seq_len, self.beam_size, self.alpha
with torch.no_grad():
src_mask = get_pad_mask(src_seq, src_pad_idx)
enc_output, gen_seq, scores = self._get_init_state(src_seq, src_mask)
ans_idx = 0 # default
for step in range(2, max_seq_len): # decode up to max length
dec_output = self._model_decode(gen_seq[:, :step], enc_output, src_mask)
gen_seq, scores = self._get_the_best_score_and_idx(gen_seq, dec_output, scores, step)
# Check if all path finished
# -- locate the eos in the generated sequences
eos_locs = gen_seq == trg_eos_idx
# -- replace the eos with its position for the length penalty use
seq_lens, _ = self.len_map.masked_fill(~eos_locs, max_seq_len).min(1)
# -- check if all beams contain eos
if (eos_locs.sum(1) > 0).sum(0).item() == beam_size:
# TODO: Try different terminate conditions.
_, ans_idx = scores.div(seq_lens.float() ** alpha).max(0)
ans_idx = ans_idx.item()
break
return gen_seq[ans_idx][:seq_lens[ans_idx]].tolist()
| 4,562 | 38.678261 | 101 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.