repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
GXN | GXN-main/util.py | from __future__ import print_function
import random
import os
import numpy as np
import networkx as nx
import argparse
import torch
from sklearn.model_selection import StratifiedKFold
cmd_opt = argparse.ArgumentParser(description='Argparser for graph_classification')
cmd_opt.add_argument('-mode', default='cpu', help='cpu/gpu')
cmd_opt.add_argument('-data_root', default='any', help='The root dir of dataset')
cmd_opt.add_argument('-data', default=None, help='data folder name')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-feat_dim', type=int, default=0, help='dimension of discrete node feature (maximum node tag)')
cmd_opt.add_argument('-num_class', type=int, default=0, help='#classes')
cmd_opt.add_argument('-fold', type=int, default=1, help='fold (1..10)')
cmd_opt.add_argument('-test_number', type=int, default=0, help='if specified, will overwrite -fold and use the last -test_number graphs as testing data')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=str, default='64', help='dimension(s) of latent layers')
cmd_opt.add_argument('-k1', type=float, default=0.9, help='The scale proportion of scale 1')
cmd_opt.add_argument('-k2', type=float, default=0.7, help='The scale proportion of scale 2')
cmd_opt.add_argument('-sortpooling_k', type=float, default=30, help='number of nodes kept after SortPooling')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='s2v output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of regression')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_opt.add_argument('-dropout', type=bool, default=False, help='whether add dropout after dense layer')
cmd_opt.add_argument('-extract_features', type=bool, default=False, help='whether to extract final graph features')
cmd_opt.add_argument('-cross_weight', type=float, default=1.0, help='weights for hidden layer cross')
cmd_opt.add_argument('-fuse_weight', type=float, default=1.0, help='weights for final fuse')
cmd_opt.add_argument('-Rhop', type=int, default=1, help='neighborhood hop')
cmd_opt.add_argument('-weight', type=str, default=None, help='saved model parameters')
cmd_args, _ = cmd_opt.parse_known_args()
cmd_args.latent_dim = [int(x) for x in cmd_args.latent_dim.split('-')]
if len(cmd_args.latent_dim) == 1:
cmd_args.latent_dim = cmd_args.latent_dim[0]
class S2VGraph(object):
def __init__(self, g, label, node_tags=None, node_features=None):
self.g = g
self.num_nodes = len(node_tags)
self.node_tags = node_tags
self.label = label
self.node_features = node_features # numpy array (node_num * feature_dim)
self.degs = list(dict(g.degree()).values())
if len(g.edges()) != 0:
x, y = zip(*g.edges())
self.num_edges = len(x)
self.edge_pairs = np.ndarray(shape=(self.num_edges, 2), dtype=np.int32)
self.edge_pairs[:, 0] = x
self.edge_pairs[:, 1] = y
self.edge_pairs = self.edge_pairs.flatten()
else:
self.num_edges = 0
self.edge_pairs = np.array([])
def load_data(root_dir, degree_as_tag):
print('loading data')
g_list = []
label_dict = {}
feat_dict = {}
data_file = os.path.join(root_dir, '%s/%s.txt' % (cmd_args.data, cmd_args.data))
with open(data_file, 'r') as f:
n_g = int(f.readline().strip())
row_list = []
for i in range(n_g):
row = f.readline().strip().split()
n, l = [int(w) for w in row]
row_list.append(int(n))
if not l in label_dict:
mapped = len(label_dict)
label_dict[l] = mapped
g = nx.Graph()
node_tags = []
node_features = []
n_edges = 0
for j in range(n):
g.add_node(j)
row = f.readline().strip().split()
tmp = int(row[1]) + 2
if tmp == len(row):
row = [int(w) for w in row]
attr = None
else:
row, attr = [int(w) for w in row[:tmp]], np.array([float(w) for w in row[tmp:]])
if not row[0] in feat_dict:
mapped = len(feat_dict)
feat_dict[row[0]] = mapped
node_tags.append(feat_dict[row[0]])
if tmp > len(row):
node_features.append(attr)
n_edges += row[1]
for k in range(2, len(row)):
g.add_edge(j, row[k])
g.add_edge(j, j)
if node_features != []:
node_features = np.stack(node_features)
node_feature_flag = True
else:
node_features = None
node_feature_flag = False
assert len(g) == n
g_list.append(S2VGraph(g, l, node_tags, node_features))
print('max node num: ', np.max(row_list), 'min node num: ', np.min(row_list), 'mean node num: ', np.mean(row_list))
for g in g_list:
g.neighbors = [[] for i in range(len(g.g))]
for i, j in g.g.edges():
g.neighbors[i].append(j)
g.neighbors[j].append(i)
degree_list = []
for i in range(len(g.g)):
g.neighbors[i] = g.neighbors[i]
degree_list.append(len(g.neighbors[i]))
g.max_neighbor = max(degree_list)
g.label = label_dict[g.label]
edges = [list(pair) for pair in g.g.edges()]
edges.extend([[i, j] for j, i in edges])
deg_list = list(dict(g.g.degree(range(len(g.g)))).values())
g.edge_mat = torch.LongTensor(edges).transpose(0,1)
if degree_as_tag:
for g in g_list:
g.node_tags_ = list(dict(g.g.degree).values())
tagset = set([])
for g in g_list:
tagset = tagset.union(set(g.node_tags_))
tagset = list(tagset)
tag2index = {tagset[i]:i for i in range(len(tagset))}
for g in g_list:
g.node_features = torch.zeros(len(g.node_tags_), len(tagset))
g.node_features[range(len(g.node_tags_)), [tag2index[tag] for tag in g.node_tags_]] = 1
node_feature_flag = True
cmd_args.num_class = len(label_dict)
cmd_args.feat_dim = len(feat_dict) # maximum node label (tag)
if node_feature_flag == True:
cmd_args.attr_dim = len(tagset) # dim of node features (attributes)
else:
cmd_args.attr_dim = 0
print('# classes: %d' % cmd_args.num_class)
print("# data: %d" % len(g_list))
return g_list
def sep_data(root_dir, graph_list, fold_idx, seed=0):
train_idx = np.loadtxt(os.path.join(root_dir, '%s/10fold_idx/train_idx-%d.txt' % (cmd_args.data, fold_idx)), dtype=np.int32).tolist()
test_idx = np.loadtxt(os.path.join(root_dir, '%s/10fold_idx/test_idx-%d.txt' % (cmd_args.data, fold_idx)), dtype=np.int32).tolist()
return [graph_list[i] for i in train_idx], [graph_list[i] for i in test_idx] | 7,339 | 41.183908 | 153 | py |
GXN | GXN-main/ops.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import scipy.sparse as sp
def spec_normalize_adj(adj, high_order=False):
adj = adj.to_dense().cpu().numpy()
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
adj_norm = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return torch.FloatTensor(adj_norm.todense())
def spac_normalize_adj(adj, high_order=False):
adj = adj.to_dense().cpu().numpy()
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -1.).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
adj_norm = adj.dot(d_mat_inv_sqrt).transpose().tocoo()
return torch.FloatTensor(adj_norm.todense())
def normalize_adj_torch(mx):
mx = mx.to_dense()
rowsum = mx.sum(1)
r_inv_sqrt = torch.pow(rowsum, -0.5).flatten()
r_inv_sqrt[torch.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = torch.diag(r_inv_sqrt)
mx = torch.matmul(mx, r_mat_inv_sqrt)
mx = torch.transpose(mx, 0, 1)
mx = torch.matmul(mx, r_mat_inv_sqrt)
return mx
class MLP(nn.Module):
def __init__(self, in_ft, out_ft, act='prelu', bias=True):
super().__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=bias)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, x):
x_fts = self.fc(x)
if self.bias is not None:
x_fts += self.bias
return self.act(x_fts)
class GCN_MI(nn.Module):
def __init__(self, in_ft, out_ft, act='prelu', bias=True):
super().__init__()
self.fc = nn.Linear(in_ft, out_ft, bias=False)
self.act = nn.PReLU() if act == 'prelu' else act
if bias:
self.bias = nn.Parameter(torch.FloatTensor(out_ft))
self.bias.data.fill_(0.0)
else:
self.register_parameter('bias', None)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, A, x, sparse=False):
x_fts = self.fc(x)
if sparse:
out = torch.unsqueeze(torch.spmm(A, torch.squeeze(x_fts, 0)), 0)
else:
out = torch.bmm(A.unsqueeze(0), x_fts.unsqueeze(0))
if self.bias is not None:
out += self.bias
return self.act(out).squeeze(0)
class GCN(nn.Module):
def __init__(self, in_dim, out_dim):
super(GCN, self).__init__()
self.proj = nn.Linear(in_dim, out_dim)
self.drop = nn.Dropout(p=0.3)
def forward(self, A, X, act=None):
X = self.drop(X)
X = torch.matmul(A, X)
X = self.proj(X)
if act is not None:
X = act(X)
return X
class Discriminator(nn.Module):
def __init__(self, n_h):
super().__init__()
self.f_k = nn.Bilinear(n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None):
c_x = c
sc_1 = torch.squeeze(self.f_k(h_pl, c_x), -2)
sc_2 = torch.squeeze(self.f_k(h_mi, c_x), -2)
if s_bias1 is not None:
sc_1 += s_bias1
if s_bias2 is not None:
sc_2 += s_bias2
logits = torch.cat((sc_1, sc_2), 0).squeeze(-1)
v = logits.shape[0]
return logits, logits[:v//2]
class GraphCrossnet(nn.Module):
def __init__(self, ks, in_dim, out_dim, dim=48, cross_weight=1.0, fuse_weight=1.0, R=1, cross_layer=2):
super(GraphCrossnet, self).__init__()
self.ks = ks
self.cs_w = cross_weight
self.fs_w = fuse_weight
self.cs_l = cross_layer
self.start_gcn_s1 = GCN(in_dim, dim)
self.start_gcn_s2 = GCN(dim, dim)
self.end_gcn = GCN(2*dim, out_dim)
self.index_select_s1 = IndexSelect(ks[0], dim, act='prelu', R=R)
self.index_select_s2 = IndexSelect(ks[1], dim, act='prelu', R=R)
self.pool_s12_start = GraphPool(dim)
self.pool_s23_start = GraphPool(dim)
self.unpool_s21_end = GraphUnpool(dim)
self.unpool_s32_end = GraphUnpool(dim)
self.s1_l1 = GCN(dim, dim)
self.s1_l2 = GCN(dim, dim)
self.s1_l3 = GCN(dim, dim)
self.s2_l1 = GCN(dim, dim)
self.s2_l2 = GCN(dim, dim)
self.s2_l3 = GCN(dim, dim)
self.s3_l1 = GCN(dim, dim)
self.s3_l2 = GCN(dim, dim)
self.s3_l3 = GCN(dim, dim)
if self.cs_l>=1:
self.pool_s12_1 = GraphPool(dim, g=True)
self.unpool_s21_1 = GraphUnpool(dim)
self.pool_s23_1 = GraphPool(dim, g=True)
self.unpool_s32_1 = GraphUnpool(dim)
if self.cs_l>=2:
self.pool_s12_2 = GraphPool(dim, g=True)
self.unpool_s21_2 = GraphUnpool(dim)
self.pool_s23_2 = GraphPool(dim, g=True)
self.unpool_s32_2 = GraphUnpool(dim)
def forward(self, A, x):
A_s1 = A
x_s1 = self.start_gcn_s1(A_s1, x)
x_org = x_s1
x_s1_ = torch.zeros_like(x_s1)
x_s1_ = x_s1[torch.randperm(x_s1.shape[0]),:]
ret_s1, value_s1, idx_s1, idx_s1_, Xdown_s1 = self.index_select_s1(x_s1, x_s1_, A_s1)
x_s2, A_s2 = self.pool_s12_start(A_s1, x_s1, idx_s1, idx_s1_, value_s1, initlayer=True)
x_s2 = self.start_gcn_s2(A_s2, x_s2)
x_s2_ = torch.zeros_like(x_s2)
x_s2_ = x_s2[torch.randperm(x_s2.shape[0]),:]
ret_s2, value_s2, idx_s2, idx_s2_, Xdown_s2 = self.index_select_s2(x_s2, x_s2_, A_s2)
x_s3, A_s3 = self.pool_s23_start(A_s2, x_s2, idx_s2, idx_s2_, value_s2, initlayer=True)
res_s1_0, res_s2_0, res_s3_0 = x_s1, x_s2, x_s3
x_s1 = self.s1_l1(A_s1, x_s1, F.relu)
x_s2 = self.s2_l1(A_s2, x_s2, F.relu)
x_s3 = self.s3_l1(A_s3, x_s3, F.relu)
res_s1_1, res_s2_1, res_s3_1 = x_s1, x_s2, x_s3
if self.cs_l >= 1:
x_s12_fu = self.pool_s12_1(A_s1, x_s1, idx_s1, idx_s1_, value_s1)
x_s21_fu = self.unpool_s21_1(A_s1, x_s2, idx_s1)
x_s23_fu = self.pool_s23_1(A_s2, x_s2, idx_s2, idx_s2_, value_s2)
x_s32_fu = self.unpool_s32_1(A_s2, x_s3, idx_s2)
x_s1 = x_s1 + self.cs_w * x_s21_fu + res_s1_0
x_s2 = x_s2 + self.cs_w * (x_s12_fu + x_s32_fu)/2 + res_s2_0
x_s3 = x_s3 + self.cs_w * x_s23_fu + res_s3_0
x_s1 = self.s1_l2(A_s1, x_s1, F.relu)
x_s2 = self.s2_l2(A_s2, x_s2, F.relu)
x_s3 = self.s3_l2(A_s3, x_s3, F.relu)
if self.cs_l >= 2:
x_s12_fu = self.pool_s12_2(A_s1, x_s1, idx_s1, idx_s1_, value_s1)
x_s21_fu = self.unpool_s21_2(A_s1, x_s2, idx_s1)
x_s23_fu = self.pool_s23_2(A_s2, x_s2, idx_s2, idx_s2_, value_s2)
x_s32_fu = self.unpool_s32_2(A_s2, x_s3, idx_s2)
x_s1 = x_s1 + self.cs_w * 0.05 * x_s21_fu
x_s2 = x_s2 + self.cs_w * 0.05 * (x_s12_fu + x_s32_fu)/2
x_s3 = x_s3 + self.cs_w * 0.05 * x_s23_fu
x_s1 = self.s1_l3(A_s1, x_s1, F.relu)
x_s2 = self.s2_l3(A_s2, x_s2, F.relu)
x_s3 = self.s3_l3(A_s3, x_s3, F.relu)
x_s3_out = self.unpool_s32_end(A_s2, x_s3, idx_s2) + Xdown_s2
x_s2_out = self.unpool_s21_end(A_s1, x_s2 + x_s3_out, idx_s1)
x_agg = x_s1 + x_s2_out * self.fs_w + Xdown_s1 * self.fs_w
x_agg = torch.cat([x_agg, x_org], 1)
x_agg = self.end_gcn(A_s1, x_agg)
return x_agg, ret_s1, ret_s2
class IndexSelect(nn.Module):
def __init__(self, k, n_h, act, R=1):
super().__init__()
self.k = k
self.R = R
self.sigm = nn.Sigmoid()
self.fc = MLP(n_h, n_h, act)
self.disc = Discriminator(n_h)
self.gcn1 = GCN(n_h, n_h)
def forward(self, seq1, seq2, A, samp_bias1=None, samp_bias2=None):
h_1 = self.fc(seq1)
h_2 = self.fc(seq2)
h_n1 = self.gcn1(A, h_1)
X = self.sigm(h_n1)
ret, ret_true = self.disc(X, h_1, h_2, samp_bias1, samp_bias2)
scores = self.sigm(ret_true).squeeze()
num_nodes = A.shape[0]
values, idx = torch.topk(scores, int(num_nodes))
values1, idx1 = values[:int(self.k*num_nodes)], idx[:int(self.k*num_nodes)]
values0, idx0 = values[int(self.k*num_nodes):], idx[int(self.k*num_nodes):]
return ret, values1, idx1, idx0, h_n1
class GraphPool(nn.Module):
def __init__(self, in_dim, g=False):
super(GraphPool, self).__init__()
self.g = g
if self.g:
self.down_gcn = GCN(in_dim, in_dim)
def forward(self, A, X, idx, idx_=None, value=None, initlayer=False):
if self.g:
X = self.down_gcn(A, X)
new_x = X[idx,:]
score = torch.unsqueeze(value, -1)
new_x = torch.mul(new_x, score)
if initlayer:
A = self.removeedge(A, idx)
return new_x, A
else:
return new_x
def removeedge(self, A, idx):
A_ = A[idx,:]
A_ = A_[:,idx]
return A_
class GraphUnpool(nn.Module):
def __init__(self, in_dim):
super(GraphUnpool, self).__init__()
self.up_gcn = GCN(in_dim, in_dim)
def forward(self, A, X, idx):
new_X = torch.zeros([A.shape[0], X.shape[1]]).to(X.device)
new_X[idx] = X
new_X = self.up_gcn(A, new_X)
return new_X | 10,385 | 31.867089 | 107 | py |
GXN | GXN-main/mlp_dropout.py | from __future__ import print_function
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class MLPRegression(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLPRegression, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, 1)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
class MLPClassifier(nn.Module):
def __init__(self, input_size, hidden_size, num_class, with_dropout=False):
super(MLPClassifier, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, num_class)
self.with_dropout = with_dropout
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
if self.with_dropout:
h1 = F.dropout(h1, training=self.training)
logits = self.h2_weights(h1)
logits = F.log_softmax(logits, dim=1)
if y is not None:
loss = F.nll_loss(logits, y)
pred = logits.data.max(1, keepdim=True)[1]
acc = pred.eq(y.data.view_as(pred)).cpu().sum().item() / float(y.size()[0])
return logits, loss, acc
else:
return logits
| 1,575 | 28.735849 | 87 | py |
GXN | GXN-main/lib/pytorch_util.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from gnn_lib import GNNLIB
def glorot_uniform(t):
if len(t.size()) == 2:
fan_in, fan_out = t.size()
elif len(t.size()) == 3:
# out_ch, in_ch, kernel for Conv 1
fan_in = t.size()[1] * t.size()[2]
fan_out = t.size()[0] * t.size()[2]
else:
fan_in = np.prod(t.size())
fan_out = np.prod(t.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
t.uniform_(-limit, limit)
def _param_init(m):
if isinstance(m, Parameter):
glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
glorot_uniform(m.weight.data)
def weights_init(m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
_param_init(pp)
else:
_param_init(p)
for name, p in m.named_parameters():
if not '.' in name: # top-level parameters
_param_init(p)
class MySpMM(torch.autograd.Function):
@staticmethod
def forward(ctx, sp_mat, dense_mat):
ctx.save_for_backward(sp_mat, dense_mat)
return torch.mm(sp_mat, dense_mat)
@staticmethod
def backward(ctx, grad_output):
sp_mat, dense_mat = ctx.saved_variables
grad_matrix1 = grad_matrix2 = None
assert not ctx.needs_input_grad[0]
if ctx.needs_input_grad[1]:
grad_matrix2 = Variable(torch.mm(sp_mat.data.t(), grad_output.data))
return grad_matrix1, grad_matrix2
def gnn_spmm(sp_mat, dense_mat):
return MySpMM.apply(sp_mat, dense_mat)
| 1,850 | 25.070423 | 80 | py |
GXN | GXN-main/lib/gnn_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
import pdb
class _gnn_lib(object):
def __init__(self, args):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libgnn.so' % dir_path)
self.lib.GetGraphStruct.restype = ctypes.c_void_p
self.lib.PrepareBatchGraph.restype = ctypes.c_int
self.lib.PrepareSparseMatrices.restype = ctypes.c_int
self.lib.NumEdgePairs.restype = ctypes.c_int
if sys.version_info[0] > 2:
args = [arg.encode() for arg in args] # str -> bytes for each element in args
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
self.lib.Init(len(args), arr)
self.batch_graph_handle = ctypes.c_void_p(self.lib.GetGraphStruct())
def _prepare_graph(self, graph_list, is_directed=0):
edgepair_list = (ctypes.c_void_p * len(graph_list))()
list_num_nodes = np.zeros((len(graph_list), ), dtype=np.int32)
list_num_edges = np.zeros((len(graph_list), ), dtype=np.int32)
for i in range(len(graph_list)):
if type(graph_list[i].edge_pairs) is ctypes.c_void_p:
edgepair_list[i] = graph_list[i].edge_pairs
elif type(graph_list[i].edge_pairs) is np.ndarray:
edgepair_list[i] = ctypes.c_void_p(graph_list[i].edge_pairs.ctypes.data)
else:
raise NotImplementedError
list_num_nodes[i] = graph_list[i].num_nodes
list_num_edges[i] = graph_list[i].num_edges
total_num_nodes = np.sum(list_num_nodes)
total_num_edges = np.sum(list_num_edges)
self.lib.PrepareBatchGraph(self.batch_graph_handle,
len(graph_list),
ctypes.c_void_p(list_num_nodes.ctypes.data),
ctypes.c_void_p(list_num_edges.ctypes.data),
ctypes.cast(edgepair_list, ctypes.c_void_p),
is_directed)
return total_num_nodes, total_num_edges
def PrepareSparseMatrices(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
n2n_idxes = torch.LongTensor(2, total_num_edges * 2)
n2n_vals = torch.FloatTensor(total_num_edges * 2)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 3)()
idx_list[0] = n2n_idxes.numpy().ctypes.data
idx_list[1] = e2n_idxes.numpy().ctypes.data
idx_list[2] = subg_idxes.numpy().ctypes.data
val_list = (ctypes.c_void_p * 3)()
val_list[0] = n2n_vals.numpy().ctypes.data
val_list[1] = e2n_vals.numpy().ctypes.data
val_list[2] = subg_vals.numpy().ctypes.data
self.lib.PrepareSparseMatrices(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2n_sp = torch.sparse.FloatTensor(n2n_idxes, n2n_vals, torch.Size([total_num_nodes, total_num_nodes]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2n_sp, e2n_sp, subg_sp
dll_path = '%s/build/dll/libgnn.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
GNNLIB = _gnn_lib(sys.argv)
else:
GNNLIB = None
| 3,813 | 40.912088 | 114 | py |
GXN | GXN-main/pytorch_structure2vec-master/graph_classification/main.py | import sys
import os
import torch
import random
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append('%s/../s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from embedding import EmbedMeanField, EmbedLoopyBP
from mlp import MLPClassifier
from util import cmd_args, load_data
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
if cmd_args.gm == 'mean_field':
model = EmbedMeanField
elif cmd_args.gm == 'loopy_bp':
model = EmbedLoopyBP
else:
print('unknown gm %s' % cmd_args.gm)
sys.exit()
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim,
num_edge_feats=0,
max_lv=cmd_args.max_lv)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = cmd_args.latent_dim
self.mlp = MLPClassifier(input_size=out_dim, hidden_size=cmd_args.hidden, num_class=cmd_args.num_class)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
concat_feat = []
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
concat_feat += batch_graph[i].node_tags
concat_feat = torch.LongTensor(concat_feat).view(-1, 1)
node_feat = torch.zeros(n_nodes, cmd_args.feat_dim)
node_feat.scatter_(1, concat_feat, 1)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
labels = labels.cuda()
return node_feat, labels
def forward(self, batch_graph):
node_feat, labels = self.PrepareFeatureLabel(batch_graph)
embed = self.s2v(batch_graph, node_feat, None)
return self.mlp(embed, labels)
def loop_dataset(g_list, classifier, sample_idxes, optimizer=None, bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
pbar = tqdm(range(total_iters), unit='batch')
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize : (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
_, loss, acc = classifier(batch_graph)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = loss.data.cpu().numpy()[0]
pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc) )
total_loss.append( np.array([loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
return avg_loss
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
train_graphs, test_graphs = load_data()
print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
classifier = Classifier()
if cmd_args.mode == 'gpu':
classifier = classifier.cuda()
optimizer = optim.Adam(classifier.parameters(), lr=cmd_args.learning_rate)
train_idxes = list(range(len(train_graphs)))
best_loss = None
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
avg_loss = loop_dataset(train_graphs, classifier, train_idxes, optimizer=optimizer)
print('\033[92maverage training of epoch %d: loss %.5f acc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1]))
test_loss = loop_dataset(test_graphs, classifier, list(range(len(test_graphs))))
print('\033[93maverage test of epoch %d: loss %.5f acc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1]))
# if best_loss is None or test_loss[0] < best_loss:
# best_loss = test_loss[0]
# print('----saving to best model since this is the best valid loss so far.----')
# torch.save(classifier.state_dict(), cmd_args.save_dir + '/epoch-best.model')
# save_args(cmd_args.save_dir + '/epoch-best-args.pkl', cmd_args)
| 4,506 | 35.346774 | 116 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/s2v_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
class _s2v_lib(object):
def __init__(self, args):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libs2v.so' % dir_path)
self.lib.GetGraphStruct.restype = ctypes.c_void_p
self.lib.PrepareBatchGraph.restype = ctypes.c_int
self.lib.PrepareMeanField.restype = ctypes.c_int
self.lib.PrepareLoopyBP.restype = ctypes.c_int
self.lib.NumEdgePairs.restype = ctypes.c_int
if sys.version_info[0] > 2:
args = [arg.encode() for arg in args] # str -> bytes for each element in args
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
self.lib.Init(len(args), arr)
self.batch_graph_handle = ctypes.c_void_p(self.lib.GetGraphStruct())
def _prepare_graph(self, graph_list, is_directed=0):
edgepair_list = (ctypes.c_void_p * len(graph_list))()
list_num_nodes = np.zeros((len(graph_list), ), dtype=np.int32)
list_num_edges = np.zeros((len(graph_list), ), dtype=np.int32)
for i in range(len(graph_list)):
if type(graph_list[i].edge_pairs) is ctypes.c_void_p:
edgepair_list[i] = graph_list[i].edge_pairs
elif type(graph_list[i].edge_pairs) is np.ndarray:
edgepair_list[i] = ctypes.c_void_p(graph_list[i].edge_pairs.ctypes.data)
else:
raise NotImplementedError
list_num_nodes[i] = graph_list[i].num_nodes
list_num_edges[i] = graph_list[i].num_edges
total_num_nodes = np.sum(list_num_nodes)
total_num_edges = np.sum(list_num_edges)
self.lib.PrepareBatchGraph(self.batch_graph_handle,
len(graph_list),
ctypes.c_void_p(list_num_nodes.ctypes.data),
ctypes.c_void_p(list_num_edges.ctypes.data),
ctypes.cast(edgepair_list, ctypes.c_void_p),
is_directed)
return total_num_nodes, total_num_edges
def PrepareMeanField(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
n2n_idxes = torch.LongTensor(2, total_num_edges * 2)
n2n_vals = torch.FloatTensor(total_num_edges * 2)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 3)()
idx_list[0] = n2n_idxes.numpy().ctypes.data
idx_list[1] = e2n_idxes.numpy().ctypes.data
idx_list[2] = subg_idxes.numpy().ctypes.data
val_list = (ctypes.c_void_p * 3)()
val_list[0] = n2n_vals.numpy().ctypes.data
val_list[1] = e2n_vals.numpy().ctypes.data
val_list[2] = subg_vals.numpy().ctypes.data
self.lib.PrepareMeanField(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2n_sp = torch.sparse.FloatTensor(n2n_idxes, n2n_vals, torch.Size([total_num_nodes, total_num_nodes]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2n_sp, e2n_sp, subg_sp
def PrepareLoopyBP(self, graph_list, is_directed=0):
assert not is_directed
total_num_nodes, total_num_edges = self._prepare_graph(graph_list, is_directed)
total_edge_pairs = self.lib.NumEdgePairs(self.batch_graph_handle)
n2e_idxes = torch.LongTensor(2, total_num_edges * 2)
n2e_vals = torch.FloatTensor(total_num_edges * 2)
e2e_idxes = torch.LongTensor(2, total_edge_pairs)
e2e_vals = torch.FloatTensor(total_edge_pairs)
e2n_idxes = torch.LongTensor(2, total_num_edges * 2)
e2n_vals = torch.FloatTensor(total_num_edges * 2)
subg_idxes = torch.LongTensor(2, total_num_nodes)
subg_vals = torch.FloatTensor(total_num_nodes)
idx_list = (ctypes.c_void_p * 4)()
idx_list[0] = ctypes.c_void_p(n2e_idxes.numpy().ctypes.data)
idx_list[1] = ctypes.c_void_p(e2e_idxes.numpy().ctypes.data)
idx_list[2] = ctypes.c_void_p(e2n_idxes.numpy().ctypes.data)
idx_list[3] = ctypes.c_void_p(subg_idxes.numpy().ctypes.data)
val_list = (ctypes.c_void_p * 4)()
val_list[0] = ctypes.c_void_p(n2e_vals.numpy().ctypes.data)
val_list[1] = ctypes.c_void_p(e2e_vals.numpy().ctypes.data)
val_list[2] = ctypes.c_void_p(e2n_vals.numpy().ctypes.data)
val_list[3] = ctypes.c_void_p(subg_vals.numpy().ctypes.data)
self.lib.PrepareLoopyBP(self.batch_graph_handle,
ctypes.cast(idx_list, ctypes.c_void_p),
ctypes.cast(val_list, ctypes.c_void_p))
n2e_sp = torch.sparse.FloatTensor(n2e_idxes, n2e_vals, torch.Size([total_num_edges * 2, total_num_nodes]))
e2e_sp = torch.sparse.FloatTensor(e2e_idxes, e2e_vals, torch.Size([total_num_edges * 2, total_num_edges * 2]))
e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))
return n2e_sp, e2e_sp, e2n_sp, subg_sp
dll_path = '%s/build/dll/libs2v.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
S2VLIB = _s2v_lib(sys.argv)
else:
S2VLIB = None
if __name__ == '__main__':
sys.path.append('%s/../harvard_cep' % os.path.dirname(os.path.realpath(__file__)))
from util import resampling_idxes, load_raw_data
from mol_lib import MOLLIB, MolGraph
raw_data_dict = load_raw_data()
test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])
batch_graph = test_data[0:10]
S2VLIB.PrepareLoopyBP(batch_graph)
| 6,308 | 43.429577 | 118 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/embedding.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from s2v_lib import S2VLIB
from pytorch_util import weights_init, gnn_spmm
class EmbedMeanField(nn.Module):
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, max_lv = 3):
super(EmbedMeanField, self).__init__()
self.latent_dim = latent_dim
self.output_dim = output_dim
self.num_node_feats = num_node_feats
self.num_edge_feats = num_edge_feats
self.max_lv = max_lv
self.w_n2l = nn.Linear(num_node_feats, latent_dim)
if num_edge_feats > 0:
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
if output_dim > 0:
self.out_params = nn.Linear(latent_dim, output_dim)
self.conv_params = nn.Linear(latent_dim, latent_dim)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
if type(node_feat) is torch.cuda.FloatTensor:
n2n_sp = n2n_sp.cuda()
e2n_sp = e2n_sp.cuda()
subg_sp = subg_sp.cuda()
node_feat = Variable(node_feat)
if edge_feat is not None:
edge_feat = Variable(edge_feat)
n2n_sp = Variable(n2n_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
h = self.mean_field(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp)
return h
def mean_field(self, node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp):
input_node_linear = self.w_n2l(node_feat)
input_message = input_node_linear
if edge_feat is not None:
input_edge_linear = self.w_e2l(edge_feat)
e2npool_input = gnn_spmm(e2n_sp, input_edge_linear)
input_message += e2npool_input
input_potential = F.relu(input_message)
lv = 0
cur_message_layer = input_potential
while lv < self.max_lv:
n2npool = gnn_spmm(n2n_sp, cur_message_layer)
node_linear = self.conv_params( n2npool )
merged_linear = node_linear + input_message
cur_message_layer = F.relu(merged_linear)
lv += 1
if self.output_dim > 0:
out_linear = self.out_params(cur_message_layer)
reluact_fp = F.relu(out_linear)
else:
reluact_fp = cur_message_layer
y_potential = gnn_spmm(subg_sp, reluact_fp)
return F.relu(y_potential)
class EmbedLoopyBP(nn.Module):
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, max_lv = 3):
super(EmbedLoopyBP, self).__init__()
self.latent_dim = latent_dim
self.max_lv = max_lv
self.w_n2l = nn.Linear(num_node_feats, latent_dim)
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
self.out_params = nn.Linear(latent_dim, output_dim)
self.conv_params = nn.Linear(latent_dim, latent_dim)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
n2e_sp, e2e_sp, e2n_sp, subg_sp = S2VLIB.PrepareLoopyBP(graph_list)
if type(node_feat) is torch.cuda.FloatTensor:
n2e_sp = n2e_sp.cuda()
e2e_sp = e2e_sp.cuda()
e2n_sp = e2n_sp.cuda()
subg_sp = subg_sp.cuda()
node_feat = Variable(node_feat)
edge_feat = Variable(edge_feat)
n2e_sp = Variable(n2e_sp)
e2e_sp = Variable(e2e_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
h = self.loopy_bp(node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp, subg_sp)
return h
def loopy_bp(self, node_feat, edge_feat, n2e_sp, e2e_sp, e2n_sp, subg_sp):
input_node_linear = self.w_n2l(node_feat)
input_edge_linear = self.w_e2l(edge_feat)
n2epool_input = gnn_spmm(n2e_sp, input_node_linear)
input_message = input_edge_linear + n2epool_input
input_potential = F.relu(input_message)
lv = 0
cur_message_layer = input_potential
while lv < self.max_lv:
e2epool = gnn_spmm(e2e_sp, cur_message_layer)
edge_linear = self.conv_params(e2epool)
merged_linear = edge_linear + input_message
cur_message_layer = F.relu(merged_linear)
lv += 1
e2npool = gnn_spmm(e2n_sp, cur_message_layer)
hidden_msg = F.relu(e2npool)
out_linear = self.out_params(hidden_msg)
reluact_fp = F.relu(out_linear)
y_potential = gnn_spmm(subg_sp, reluact_fp)
return F.relu(y_potential)
| 4,855 | 33.685714 | 91 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/pytorch_util.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from s2v_lib import S2VLIB
def glorot_uniform(t):
if len(t.size()) == 2:
fan_in, fan_out = t.size()
elif len(t.size()) == 3:
# out_ch, in_ch, kernel for Conv 1
fan_in = t.size()[1] * t.size()[2]
fan_out = t.size()[0] * t.size()[2]
else:
fan_in = np.prod(t.size())
fan_out = np.prod(t.size())
limit = np.sqrt(6.0 / (fan_in + fan_out))
t.uniform_(-limit, limit)
def _param_init(m):
if isinstance(m, Parameter):
glorot_uniform(m.data)
elif isinstance(m, nn.Linear):
if m.bias is not None:
m.bias.data.zero_()
glorot_uniform(m.weight.data)
def weights_init(m):
for p in m.modules():
if isinstance(p, nn.ParameterList):
for pp in p:
_param_init(pp)
else:
_param_init(p)
for name, p in m.named_parameters():
if not '.' in name: # top-level parameters
_param_init(p)
class MySpMM(torch.autograd.Function):
@staticmethod
def forward(ctx, sp_mat, dense_mat):
ctx.save_for_backward(sp_mat, dense_mat)
return torch.mm(sp_mat, dense_mat)
@staticmethod
def backward(ctx, grad_output):
sp_mat, dense_mat = ctx.saved_variables
grad_matrix1 = grad_matrix2 = None
assert not ctx.needs_input_grad[0]
if ctx.needs_input_grad[1]:
grad_matrix2 = Variable(torch.mm(sp_mat.data.t(), grad_output.data))
return grad_matrix1, grad_matrix2
def gnn_spmm(sp_mat, dense_mat):
return MySpMM.apply(sp_mat, dense_mat)
| 1,885 | 25.194444 | 80 | py |
GXN | GXN-main/pytorch_structure2vec-master/s2v_lib/mlp.py | from __future__ import print_function
import os
import sys
import numpy as np
import torch
import random
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from pytorch_util import weights_init
class MLPRegression(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLPRegression, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, 1)
weights_init(self)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
pred = self.h2_weights(h1)
if y is not None:
y = Variable(y)
mse = F.mse_loss(pred, y)
mae = F.l1_loss(pred, y)
return pred, mae, mse
else:
return pred
class MLPClassifier(nn.Module):
def __init__(self, input_size, hidden_size, num_class):
super(MLPClassifier, self).__init__()
self.h1_weights = nn.Linear(input_size, hidden_size)
self.h2_weights = nn.Linear(hidden_size, num_class)
weights_init(self)
def forward(self, x, y = None):
h1 = self.h1_weights(x)
h1 = F.relu(h1)
logits = self.h2_weights(h1)
logits = F.log_softmax(logits, dim=1)
if y is not None:
y = Variable(y)
loss = F.nll_loss(logits, y)
pred = logits.data.max(1, keepdim=True)[1]
acc = pred.eq(y.data.view_as(pred)).cpu().sum() / float(y.size()[0])
return logits, loss, acc
else:
return logits | 1,710 | 25.734375 | 80 | py |
GXN | GXN-main/pytorch_structure2vec-master/harvard_cep/main.py | import sys
import os
from mol_lib import MOLLIB, MolGraph
import torch
import random
import numpy as np
from tqdm import tqdm
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
sys.path.append('%s/../s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from embedding import EmbedMeanField, EmbedLoopyBP
from mlp import MLPRegression
from util import resampling_idxes, load_raw_data
import argparse
cmd_opt = argparse.ArgumentParser(description='Argparser for harvard cep')
cmd_opt.add_argument('-saved_model', default=None, help='start from existing model')
cmd_opt.add_argument('-save_dir', default='./saved', help='save_dir')
cmd_opt.add_argument('-mode', default='gpu', help='cpu/gpu')
cmd_opt.add_argument('-gm', default='mean_field', help='mean_field/loopy_bp')
cmd_opt.add_argument('-phase', default='train', help='train/test')
cmd_opt.add_argument('-batch_size', type=int, default=50, help='minibatch size')
cmd_opt.add_argument('-seed', type=int, default=1, help='seed')
cmd_opt.add_argument('-gen_depth', type=int, default=10, help='depth of generator')
cmd_opt.add_argument('-num_epochs', type=int, default=1000, help='number of epochs')
cmd_opt.add_argument('-latent_dim', type=int, default=64, help='dimension of latent layers')
cmd_opt.add_argument('-out_dim', type=int, default=1024, help='s2v output size')
cmd_opt.add_argument('-hidden', type=int, default=100, help='dimension of regression')
cmd_opt.add_argument('-max_lv', type=int, default=4, help='max rounds of message passing')
cmd_opt.add_argument('-learning_rate', type=float, default=0.0001, help='init learning_rate')
cmd_args, _ = cmd_opt.parse_known_args()
def loop_dataset(mol_list, regressor, sample_idxes, optimizer=None, start_iter=None, n_iters=None, bsize=cmd_args.batch_size):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
if start_iter is not None:
ed_iter = start_iter + n_iters
if ed_iter > total_iters:
ed_iter = total_iters
pbar = tqdm(range(start_iter, ed_iter), unit='batch')
else:
pbar = tqdm(range(total_iters), unit='batch')
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize : (pos + 1) * bsize]
batch_graph = [mol_list[idx] for idx in selected_idx]
_, mae, mse = regressor(batch_graph)
if optimizer is not None:
optimizer.zero_grad()
mse.backward()
optimizer.step()
mae = mae.data.cpu().numpy()[0]
mse = mse.data.cpu().numpy()[0]
pbar.set_description('mae: %0.5f rmse: %0.5f' % (mae, np.sqrt(mse)) )
total_loss.append( np.array([mae, mse]) * len(selected_idx))
n_samples += len(selected_idx)
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
avg_loss[1] = np.sqrt(avg_loss[1])
return avg_loss
class Regressor(nn.Module):
def __init__(self):
super(Regressor, self).__init__()
if cmd_args.gm == 'mean_field':
model = EmbedMeanField
elif cmd_args.gm == 'loopy_bp':
model = EmbedLoopyBP
else:
print('unknown gm %s' % cmd_args.gm)
sys.exit()
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=MOLLIB.num_node_feats,
num_edge_feats=MOLLIB.num_edge_feats,
max_lv=cmd_args.max_lv)
self.mlp = MLPRegression(input_size=cmd_args.out_dim, hidden_size=cmd_args.hidden)
def forward(self, batch_graph):
node_feat, edge_feat, labels = MOLLIB.PrepareFeatureLabel(batch_graph)
if cmd_args.mode == 'gpu':
node_feat = node_feat.cuda()
edge_feat = edge_feat.cuda()
labels = labels.cuda()
embed = self.s2v(batch_graph, node_feat, edge_feat)
return self.mlp(embed, labels)
if __name__ == '__main__':
random.seed(cmd_args.seed)
np.random.seed(cmd_args.seed)
torch.manual_seed(cmd_args.seed)
raw_data_dict = load_raw_data()
regressor = Regressor()
if cmd_args.mode == 'gpu':
regressor = regressor.cuda()
if cmd_args.saved_model is not None and cmd_args.saved_model != '':
if os.path.isfile(cmd_args.saved_model):
print('loading model from %s' % cmd_args.saved_model)
if cmd_args.mode == 'cpu':
regressor.load_state_dict(torch.load(cmd_args.saved_model, map_location=lambda storage, loc: storage))
else:
regressor.load_state_dict(torch.load(cmd_args.saved_model))
if cmd_args.phase == 'test':
test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])
test_loss = loop_dataset(test_data, regressor, list(range(len(test_data))))
print('\033[93maverage test loss: mae %.5f rmse %.5f\033[0m' % (test_loss[0], test_loss[1]))
sys.exit()
train_idxes = resampling_idxes(raw_data_dict)
cooked_data_dict = {}
for d in raw_data_dict:
cooked_data_dict[d] = MOLLIB.LoadMolGraph(d, raw_data_dict[d])
optimizer = optim.Adam(regressor.parameters(), lr=cmd_args.learning_rate)
iter_train = (len(train_idxes) + (cmd_args.batch_size - 1)) // cmd_args.batch_size
best_valid_loss = None
for epoch in range(cmd_args.num_epochs):
valid_interval = 10000
for i in range(0, iter_train, valid_interval):
avg_loss = loop_dataset(cooked_data_dict['train'], regressor, train_idxes, optimizer, start_iter=i, n_iters=valid_interval)
print('\033[92maverage training of epoch %.2f: mae %.5f rmse %.5f\033[0m' % (epoch + min(float(i + valid_interval) / iter_train, 1.0), avg_loss[0], avg_loss[1]))
valid_loss = loop_dataset(cooked_data_dict['valid'], regressor, list(range(len(cooked_data_dict['valid']))))
print('\033[93maverage valid of epoch %.2f: mae %.5f rmse %.5f\033[0m' % (epoch + min(float(i + valid_interval) / iter_train, 1.0), valid_loss[0], valid_loss[1]))
if best_valid_loss is None or valid_loss[0] < best_valid_loss:
best_valid_loss = valid_loss[0]
print('----saving to best model since this is the best valid loss so far.----')
torch.save(regressor.state_dict(), cmd_args.save_dir + '/epoch-best.model')
random.shuffle(train_idxes)
| 6,670 | 42.888158 | 174 | py |
GXN | GXN-main/pytorch_structure2vec-master/harvard_cep/mol_lib.py | import ctypes
import numpy as np
import os
import sys
import torch
from tqdm import tqdm
class _mol_lib(object):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
self.lib = ctypes.CDLL('%s/build/dll/libmol.so' % dir_path)
# self.lib.Smiles2Graph.restype = ctypes.c_void_p
self.lib.PrepareBatchFeature.restype = ctypes.c_int
self.lib.DumpFeatures.restype = ctypes.c_int
self.lib.LoadMolGraph.restype = ctypes.c_int
self.lib.NodeFeatDim.restype = ctypes.c_int
self.lib.EdgeFeatDim.restype = ctypes.c_int
self.lib.NumNodes.restype = ctypes.c_int
self.lib.NumEdges.restype = ctypes.c_int
self.lib.EdgeList.restype = ctypes.c_void_p
self.num_node_feats = self.lib.NodeFeatDim()
self.num_edge_feats = self.lib.EdgeFeatDim()
def PrepareFeatureLabel(self, molgraph_list):
c_list = (ctypes.c_void_p * len(molgraph_list))()
total_num_nodes = 0
total_num_edges = 0
for i in range(len(molgraph_list)):
c_list[i] = molgraph_list[i].handle
total_num_nodes += molgraph_list[i].num_nodes
total_num_edges += molgraph_list[i].num_edges
torch_node_feat = torch.zeros(total_num_nodes, self.num_node_feats)
torch_edge_feat = torch.zeros(total_num_edges * 2, self.num_edge_feats)
torch_label = torch.zeros(len(molgraph_list), 1)
node_feat = torch_node_feat.numpy()
edge_feat = torch_edge_feat.numpy()
label = torch_label.numpy()
self.lib.PrepareBatchFeature(len(molgraph_list), ctypes.cast(c_list, ctypes.c_void_p),
ctypes.c_void_p(node_feat.ctypes.data),
ctypes.c_void_p(edge_feat.ctypes.data))
for i in range(len(molgraph_list)):
label[i] = molgraph_list[i].pce
return torch_node_feat, torch_edge_feat, torch_label
def DumpFeatures(self, fname):
p = ctypes.cast(fname, ctypes.c_char_p)
self.lib.DumpFeatures(p)
def LoadMolGraph(self, phase, str_pce_tuples):
fname = 'data/%s.txt.bin' % phase
assert os.path.isfile(fname)
fname = ctypes.cast(fname, ctypes.c_char_p)
num_graphs = len(str_pce_tuples)
c_list = (ctypes.c_void_p * num_graphs)()
t = self.lib.LoadMolGraph(fname, ctypes.cast(c_list, ctypes.c_void_p))
assert t == num_graphs
molgraph_list = []
for i in tqdm(range(0, t)):
g = MolGraph(c_list[i], str_pce_tuples[i][0], str_pce_tuples[i][1])
molgraph_list.append(g)
return molgraph_list
# def __CtypeNetworkX(self, g):
# edges = g.edges()
# e_list_from = (ctypes.c_int * len(edges))()
# e_list_to = (ctypes.c_int * len(edges))()
# if len(edges):
# a, b = zip(*edges)
# e_list_from[:] = a
# e_list_to[:] = b
# return (len(g.nodes()), len(edges), ctypes.cast(e_list_from, ctypes.c_void_p), ctypes.cast(e_list_to, ctypes.c_void_p))
# def TakeSnapshot(self):
# self.lib.UpdateSnapshot()
# def ClearTrainGraphs(self):
# self.ngraph_train = 0
# self.lib.ClearTrainGraphs()
# def InsertGraph(self, g, is_test):
# n_nodes, n_edges, e_froms, e_tos = self.__CtypeNetworkX(g)
# if is_test:
# t = self.ngraph_test
# self.ngraph_test += 1
# else:
# t = self.ngraph_train
# self.ngraph_train += 1
# self.lib.InsertGraph(is_test, t, n_nodes, n_edges, e_froms, e_tos)
# def LoadModel(self, path_to_model):
# p = ctypes.cast(path_to_model, ctypes.c_char_p)
# self.lib.LoadModel(p)
# def GetSol(self, gid, maxn):
# sol = (ctypes.c_int * (maxn + 10))()
# val = self.lib.GetSol(gid, sol)
# return val, sol
dll_path = '%s/build/dll/libmol.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
MOLLIB = _mol_lib()
class MolGraph(object):
def __init__(self, handle, smiles, pce):
self.smiles = smiles
self.handle = ctypes.c_void_p(handle)
self.num_nodes = MOLLIB.lib.NumNodes(self.handle)
self.num_edges = MOLLIB.lib.NumEdges(self.handle)
# self.edge_pairs = np.ctypeslib.as_array(MOLLIB.lib.EdgeList(self.handle), shape=( self.num_edges * 2, ))
self.edge_pairs = ctypes.c_void_p(MOLLIB.lib.EdgeList(self.handle))
self.pce = pce
else:
MOLLIB = None
MolGraph = None
if __name__ == '__main__':
MOLLIB.DumpFeatures('data/train.txt')
MOLLIB.DumpFeatures('data/valid.txt')
MOLLIB.DumpFeatures('data/test.txt') | 4,800 | 34.828358 | 129 | py |
SeeChart | SeeChart-main/qna.py | # !pip install transformers
# !pip install datasets
# !pip install nltk
import json
import math
import os
import sys
import nltk # Here to have a nice missing dependency error message early on
import transformers
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
)
from transformers.file_utils import is_offline_mode
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.11.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
def postprocess_text(preds):
preds = [pred.strip() for pred in preds]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
return preds
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
model_name = "t5-base"
# checkpoint_path = "/content/t5_best_checkpoint_plotqa" # OLD
checkpoint_path = "t5_best_checkpoint_plotqa/checkpoint-560000/"
config = AutoConfig.from_pretrained(
checkpoint_path,
cache_dir="cache",
revision="main",
use_auth_token=None,
)
tokenizer = AutoTokenizer.from_pretrained(
checkpoint_path,
cache_dir="cache",
use_fast=True,
revision="main",
use_auth_token=None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
checkpoint_path,
config=config,
cache_dir="cache",
revision="main",
use_auth_token=None,
)
model.resize_token_embeddings(len(tokenizer))
# input_text = "Question: What does the 2nd bar from the top in Primary schools represents ? Table: Schools | Pre-primary schools | Primary schools | Secondary schools | Tertiary schools & Egypt Gross enrolment ratio (%) | 100.05 | 99.54 | 84.65 | 23.86 & Luxembourg Gross enrolment ratio (%) | 92.75 | 88.51 | 71.8 | 2.05"
# input_text = "Question: How many bars are there ? Table: Country | Lebanon | Mali | Nepal | Peru & Female % of children under 5 | 1.3 | 11.8 | 3.7 | 0.7 & Male % of children under 5 | 1.8 | 13.9 | 4.5 | 0.8 Chart Type: hbar_categorical Title: Prevalence of severe wasting in children of different countries with age under 5 years x_axis_title: % of children under 5 y_axis_title: Country"
def predict_answer(tokenizer, model, input_text):
model_inputs = tokenizer(input_text, return_tensors="pt")
preds = model.generate(**model_inputs)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = postprocess_text(decoded_preds)
return decoded_preds[0]
def askMe(chart_id, question):
f = open('static/generated_new_summary_baseline/' + chart_id + '.json')
target_json = json.load(f)
title = target_json['title']
xAxis = target_json['xAxis']
yAxis = target_json['yAxis']
column_type = target_json['columnType']
graphType = target_json['graphType']
if column_type == "two" and graphType in ['bar', 'line']:
str1 = xAxis.strip()
str2 = yAxis.strip()
for i in target_json['data']:
str1 += " | " + str(i[xAxis]).strip()
str2 += " | " + str(i[yAxis]).strip()
# print(str1)
# print(str2)
input_text = "Question: " + question + "? Table: " + str1 + " & " + str2 + " Title: " + title + " x_axis_title: " + xAxis + " y_axis_title: " + yAxis
print(input_text)
answer = predict_answer(tokenizer, model, input_text)
print(answer)
return answer
elif column_type == "multi":
str1 = xAxis.strip()
str2 = yAxis.strip()
group = []
for i in target_json['data']:
str1 += " | " + str(i[xAxis]).strip()
for i in range(1, len(target_json['labels'])):
group.append(target_json['labels'][i])
group_str = ""
for i in group:
group_str += " & " + i
for j in target_json['data']:
group_str += " | " + j[i]
input_text = "Question: " + question + "? Table: " + str1 + group_str + " Title: " + title + " x_axis_title: " + xAxis + " y_axis_title: " + yAxis
print(input_text)
print(question)
answer = predict_answer(tokenizer, model, input_text)
print(answer)
# if answer.is_integer():
# answer = math.ceil(answer)
return answer
# QUESTION EXAMPLE : https://arxiv.org/pdf/1909.00997.pdf
question = "Does the Time in minutes increase over the years for Desktop"
# question = "Across all Years, what is the maximum value"
# question = "Across all years, what is the minimum value"
# question = "What is the difference between 2006 and 2007"
# question = "Does the graph contain any zero values"
# question = "Does the graph contain grids"
# question = "How many legend labels are there"
# question = "How many years are there" # WRONG
# question = "How many lines intersect with each other?"
# question = "How many lines are there"
# question = "What is the maximum value for desktop"
# chart_id = "1092"
# chart_id = "795"
# chart_id = "818"
chart_id = "545"
# askMe(chart_id=chart_id, question=question)
| 5,738 | 30.707182 | 393 | py |
lama-cleaner | lama-cleaner-main/scripts/tool.py | import glob
import os
from typing import Dict, List, Union
import torch
from diffusers.utils import is_safetensors_available
if is_safetensors_available():
import safetensors.torch
from huggingface_hub import snapshot_download
from diffusers import DiffusionPipeline, __version__
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
from diffusers.utils import (
CONFIG_NAME,
DIFFUSERS_CACHE,
ONNX_WEIGHTS_NAME,
WEIGHTS_NAME,
)
class CheckpointMergerPipeline(DiffusionPipeline):
"""
A class that that supports merging diffusion models based on the discussion here:
https://github.com/huggingface/diffusers/issues/877
Example usage:-
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
merged_pipe.to('cuda')
prompt = "An astronaut riding a unicycle on Mars"
results = merged_pipe(prompt)
## For more details, see the docstring for the merge method.
"""
def __init__(self):
self.register_to_config()
super().__init__()
def _compare_model_configs(self, dict0, dict1):
if dict0 == dict1:
return True
else:
config0, meta_keys0 = self._remove_meta_keys(dict0)
config1, meta_keys1 = self._remove_meta_keys(dict1)
if config0 == config1:
print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
return True
return False
def _remove_meta_keys(self, config_dict: Dict):
meta_keys = []
temp_dict = config_dict.copy()
for key in config_dict.keys():
if key.startswith("_"):
temp_dict.pop(key)
meta_keys.append(key)
return (temp_dict, meta_keys)
@torch.no_grad()
def merge(
self,
pretrained_model_name_or_path_list: List[Union[str, os.PathLike]],
**kwargs,
):
"""
Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
in the argument 'pretrained_model_name_or_path_list' as a list.
Parameters:
-----------
pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
**kwargs:
Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
cache_dir, resume_download, force_download, proxies, local_files_only, use_auth_token, revision, torch_dtype, device_map.
alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
"""
# Default kwargs from DiffusionPipeline
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
resume_download = kwargs.pop("resume_download", False)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
torch_dtype = kwargs.pop("torch_dtype", None)
device_map = kwargs.pop("device_map", None)
alpha = kwargs.pop("alpha", 0.5)
interp = kwargs.pop("interp", None)
print("Received list", pretrained_model_name_or_path_list)
print(f"Combining with alpha={alpha}, interpolation mode={interp}")
checkpoint_count = len(pretrained_model_name_or_path_list)
# Ignore result from model_index_json comparision of the two checkpoints
force = kwargs.pop("force", False)
# If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
if checkpoint_count > 3 or checkpoint_count < 2:
raise ValueError(
"Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
" passed."
)
print("Received the right number of checkpoints")
# chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
# chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
# Validate that the checkpoints can be merged
# Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
config_dicts = []
for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
config_dict = DiffusionPipeline.load_config(
pretrained_model_name_or_path,
cache_dir=cache_dir,
resume_download=resume_download,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
)
config_dicts.append(config_dict)
comparison_result = True
for idx in range(1, len(config_dicts)):
comparison_result &= self._compare_model_configs(
config_dicts[idx - 1], config_dicts[idx]
)
if not force and comparison_result is False:
raise ValueError(
"Incompatible checkpoints. Please check model_index.json for the models."
)
print(config_dicts[0], config_dicts[1])
print("Compatible model_index.json files found")
# Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
cached_folders = []
for pretrained_model_name_or_path, config_dict in zip(
pretrained_model_name_or_path_list, config_dicts
):
folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
allow_patterns = [os.path.join(k, "*") for k in folder_names]
allow_patterns += [
WEIGHTS_NAME,
SCHEDULER_CONFIG_NAME,
CONFIG_NAME,
ONNX_WEIGHTS_NAME,
DiffusionPipeline.config_name,
]
requested_pipeline_class = config_dict.get("_class_name")
user_agent = {
"diffusers": __version__,
"pipeline_class": requested_pipeline_class,
}
cached_folder = (
pretrained_model_name_or_path
if os.path.isdir(pretrained_model_name_or_path)
else snapshot_download(
pretrained_model_name_or_path,
cache_dir=cache_dir,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
allow_patterns=allow_patterns,
user_agent=user_agent,
)
)
print("Cached Folder", cached_folder)
cached_folders.append(cached_folder)
# Step 3:-
# Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
final_pipe = DiffusionPipeline.from_pretrained(
cached_folders[0], torch_dtype=torch_dtype, device_map=device_map
)
final_pipe.to(self.device)
checkpoint_path_2 = None
if len(cached_folders) > 2:
checkpoint_path_2 = os.path.join(cached_folders[2])
if interp == "sigmoid":
theta_func = CheckpointMergerPipeline.sigmoid
elif interp == "inv_sigmoid":
theta_func = CheckpointMergerPipeline.inv_sigmoid
elif interp == "add_diff":
theta_func = CheckpointMergerPipeline.add_difference
else:
theta_func = CheckpointMergerPipeline.weighted_sum
# Find each module's state dict.
for attr in final_pipe.config.keys():
if not attr.startswith("_"):
checkpoint_path_1 = os.path.join(cached_folders[1], attr)
if os.path.exists(checkpoint_path_1):
files = list(
(
*glob.glob(
os.path.join(checkpoint_path_1, "*.safetensors")
),
*glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
)
)
checkpoint_path_1 = files[0] if len(files) > 0 else None
if len(cached_folders) < 3:
checkpoint_path_2 = None
else:
checkpoint_path_2 = os.path.join(cached_folders[2], attr)
if os.path.exists(checkpoint_path_2):
files = list(
(
*glob.glob(
os.path.join(checkpoint_path_2, "*.safetensors")
),
*glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
)
)
checkpoint_path_2 = files[0] if len(files) > 0 else None
# For an attr if both checkpoint_path_1 and 2 are None, ignore.
# If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match.
if checkpoint_path_1 is None and checkpoint_path_2 is None:
print(f"Skipping {attr}: not present in 2nd or 3d model")
continue
try:
module = getattr(final_pipe, attr)
if isinstance(
module, bool
): # ignore requires_safety_checker boolean
continue
theta_0 = getattr(module, "state_dict")
theta_0 = theta_0()
update_theta_0 = getattr(module, "load_state_dict")
theta_1 = (
safetensors.torch.load_file(checkpoint_path_1)
if (
is_safetensors_available()
and checkpoint_path_1.endswith(".safetensors")
)
else torch.load(checkpoint_path_1, map_location="cpu")
)
if attr in ['vae', 'text_encoder']:
print(f"Direct use theta1 {attr}: {checkpoint_path_1}")
update_theta_0(theta_1)
del theta_1
del theta_0
continue
theta_2 = None
if checkpoint_path_2:
theta_2 = (
safetensors.torch.load_file(checkpoint_path_2)
if (
is_safetensors_available()
and checkpoint_path_2.endswith(".safetensors")
)
else torch.load(checkpoint_path_2, map_location="cpu")
)
if not theta_0.keys() == theta_1.keys():
print(f"Skipping {attr}: key mismatch")
continue
if theta_2 and not theta_1.keys() == theta_2.keys():
print(f"Skipping {attr}:y mismatch")
except Exception as e:
print(f"Skipping {attr} do to an unexpected error: {str(e)}")
continue
print(f"MERGING {attr}")
for key in theta_0.keys():
if theta_2:
theta_0[key] = theta_func(
theta_0[key], theta_1[key], theta_2[key], alpha
)
else:
theta_0[key] = theta_func(
theta_0[key], theta_1[key], None, alpha
)
del theta_1
del theta_2
update_theta_0(theta_0)
del theta_0
return final_pipe
@staticmethod
def weighted_sum(theta0, theta1, theta2, alpha):
return ((1 - alpha) * theta0) + (alpha * theta1)
# Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
@staticmethod
def sigmoid(theta0, theta1, theta2, alpha):
alpha = alpha * alpha * (3 - (2 * alpha))
return theta0 + ((theta1 - theta0) * alpha)
# Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
@staticmethod
def inv_sigmoid(theta0, theta1, theta2, alpha):
import math
alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
return theta0 + ((theta1 - theta0) * alpha)
@staticmethod
def add_difference(theta0, theta1, theta2, alpha):
# theta0 + (theta1 - theta2) * (1.0 - alpha)
diff = (theta1 - theta2) * (1.0 - alpha)
# print(f"theta0.shape: {theta0.shape}, diff shape: {diff.shape}")
# theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
if theta0.shape != diff.shape:
theta0[:, 0:4, :, :] = theta0[:, 0:4, :, :] + diff
else:
theta0 = theta0 + diff
return theta0
pipe = CheckpointMergerPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
merged_pipe = pipe.merge(
[
"runwayml/stable-diffusion-inpainting",
#"SG161222/Realistic_Vision_V1.4",
"dreamlike-art/dreamlike-diffusion-1.0",
"runwayml/stable-diffusion-v1-5",
],
force=True,
interp="add_diff",
alpha=0,
)
merged_pipe = merged_pipe.to(torch.float16)
merged_pipe.save_pretrained("dreamlike-diffusion-1.0-inpainting", safe_serialization=True)
| 14,778 | 39.825967 | 171 | py |
lama-cleaner | lama-cleaner-main/scripts/convert_vae_pt_to_diffusers.py | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def custom_convert_ldm_vae_checkpoint(checkpoint, config):
vae_state_dict = checkpoint
new_checkpoint = {}
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict[
"encoder.conv_out.weight"
]
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict[
"encoder.norm_out.weight"
]
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict[
"encoder.norm_out.bias"
]
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict[
"decoder.conv_out.weight"
]
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict[
"decoder.norm_out.weight"
]
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict[
"decoder.norm_out.bias"
]
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
num_down_blocks = len(
{
".".join(layer.split(".")[:3])
for layer in vae_state_dict
if "encoder.down" in layer
}
)
down_blocks = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key]
for layer_id in range(num_down_blocks)
}
# Retrieves the keys for the decoder up blocks only
num_up_blocks = len(
{
".".join(layer.split(".")[:3])
for layer in vae_state_dict
if "decoder.up" in layer
}
)
up_blocks = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key]
for layer_id in range(num_up_blocks)
}
for i in range(num_down_blocks):
resnets = [
key
for key in down_blocks[i]
if f"down.{i}" in key and f"down.{i}.downsample" not in key
]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
new_checkpoint[
f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"
] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.weight")
new_checkpoint[
f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"
] = vae_state_dict.pop(f"encoder.down.{i}.downsample.conv.bias")
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
num_mid_res_blocks = 2
for i in range(1, num_mid_res_blocks + 1):
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
paths = renew_vae_attention_paths(mid_attentions)
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
conv_attn_to_linear(new_checkpoint)
for i in range(num_up_blocks):
block_id = num_up_blocks - 1 - i
resnets = [
key
for key in up_blocks[block_id]
if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
new_checkpoint[
f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"
] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.weight"]
new_checkpoint[
f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"
] = vae_state_dict[f"decoder.up.{block_id}.upsample.conv.bias"]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
num_mid_res_blocks = 2
for i in range(1, num_mid_res_blocks + 1):
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
paths = renew_vae_resnet_paths(resnets)
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
paths = renew_vae_attention_paths(mid_attentions)
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(
paths,
new_checkpoint,
vae_state_dict,
additional_replacements=[meta_path],
config=config,
)
conv_attn_to_linear(new_checkpoint)
return new_checkpoint
def vae_pt_to_vae_diffuser(
checkpoint_path: str,
output_path: str,
):
# Only support V1
r = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
)
io_obj = io.BytesIO(r.content)
original_config = OmegaConf.load(io_obj)
image_size = 512
device = "cuda" if torch.cuda.is_available() else "cpu"
checkpoint = torch.load(checkpoint_path, map_location=device)
# Convert the VAE model.
vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
converted_vae_checkpoint = custom_convert_ldm_vae_checkpoint(
checkpoint["state_dict"], vae_config
)
vae = AutoencoderKL(**vae_config)
vae.load_state_dict(converted_vae_checkpoint)
vae.save_pretrained(output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--vae_pt_path",
default="/Users/cwq/code/github/lama-cleaner/scripts/anything-v4.0.vae.pt",
type=str,
help="Path to the VAE.pt to convert.",
)
parser.add_argument(
"--dump_path",
default="diffusion_pytorch_model.bin",
type=str,
help="Path to the VAE.pt to convert.",
)
args = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 7,961 | 33.318966 | 117 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model_manager.py | import torch
import gc
from loguru import logger
from lama_cleaner.const import SD15_MODELS
from lama_cleaner.helper import switch_mps_device
from lama_cleaner.model.controlnet import ControlNet
from lama_cleaner.model.fcf import FcF
from lama_cleaner.model.lama import LaMa
from lama_cleaner.model.ldm import LDM
from lama_cleaner.model.manga import Manga
from lama_cleaner.model.mat import MAT
from lama_cleaner.model.paint_by_example import PaintByExample
from lama_cleaner.model.instruct_pix2pix import InstructPix2Pix
from lama_cleaner.model.sd import SD15, SD2, Anything4, RealisticVision14
from lama_cleaner.model.utils import torch_gc
from lama_cleaner.model.zits import ZITS
from lama_cleaner.model.opencv2 import OpenCV2
from lama_cleaner.schema import Config
models = {
"lama": LaMa,
"ldm": LDM,
"zits": ZITS,
"mat": MAT,
"fcf": FcF,
SD15.name: SD15,
Anything4.name: Anything4,
RealisticVision14.name: RealisticVision14,
"cv2": OpenCV2,
"manga": Manga,
"sd2": SD2,
"paint_by_example": PaintByExample,
"instruct_pix2pix": InstructPix2Pix,
}
class ModelManager:
def __init__(self, name: str, device: torch.device, **kwargs):
self.name = name
self.device = device
self.kwargs = kwargs
self.model = self.init_model(name, device, **kwargs)
def init_model(self, name: str, device, **kwargs):
if name in SD15_MODELS and kwargs.get("sd_controlnet", False):
return ControlNet(device, **{**kwargs, "name": name})
if name in models:
model = models[name](device, **kwargs)
else:
raise NotImplementedError(f"Not supported model: {name}")
return model
def is_downloaded(self, name: str) -> bool:
if name in models:
return models[name].is_downloaded()
else:
raise NotImplementedError(f"Not supported model: {name}")
def __call__(self, image, mask, config: Config):
self.switch_controlnet_method(control_method=config.controlnet_method)
return self.model(image, mask, config)
def switch(self, new_name: str, **kwargs):
if new_name == self.name:
return
try:
if torch.cuda.memory_allocated() > 0:
# Clear current loaded model from memory
torch.cuda.empty_cache()
del self.model
gc.collect()
self.model = self.init_model(
new_name, switch_mps_device(new_name, self.device), **self.kwargs
)
self.name = new_name
except NotImplementedError as e:
raise e
def switch_controlnet_method(self, control_method: str):
if not self.kwargs.get("sd_controlnet"):
return
if self.kwargs["sd_controlnet_method"] == control_method:
return
if self.model.is_local_sd_model:
# is_native_control_inpaint 表示加载了普通 SD 模型
if (
self.model.is_native_control_inpaint
and control_method != "control_v11p_sd15_inpaint"
):
raise RuntimeError(
f"--sd-local-model-path load a normal SD model, "
f"to use {control_method} you should load an inpainting SD model"
)
elif (
not self.model.is_native_control_inpaint
and control_method == "control_v11p_sd15_inpaint"
):
raise RuntimeError(
f"--sd-local-model-path load an inpainting SD model, "
f"to use {control_method} you should load a norml SD model"
)
del self.model
torch_gc()
old_method = self.kwargs["sd_controlnet_method"]
self.kwargs["sd_controlnet_method"] = control_method
self.model = self.init_model(
self.name, switch_mps_device(self.name, self.device), **self.kwargs
)
logger.info(f"Switch ControlNet method from {old_method} to {control_method}")
| 4,073 | 34.12069 | 86 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/benchmark.py | #!/usr/bin/env python3
import argparse
import os
import time
import numpy as np
import nvidia_smi
import psutil
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, SDSampler
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
NUM_THREADS = str(4)
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
def run_model(model, size):
# RGB
image = np.random.randint(0, 256, (size[0], size[1], 3)).astype(np.uint8)
mask = np.random.randint(0, 255, size).astype(np.uint8)
config = Config(
ldm_steps=2,
hd_strategy=HDStrategy.ORIGINAL,
hd_strategy_crop_margin=128,
hd_strategy_crop_trigger_size=128,
hd_strategy_resize_limit=128,
prompt="a fox is sitting on a bench",
sd_steps=5,
sd_sampler=SDSampler.ddim
)
model(image, mask, config)
def benchmark(model, times: int, empty_cache: bool):
sizes = [(512, 512)]
nvidia_smi.nvmlInit()
device_id = 0
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(device_id)
def format(metrics):
return f"{np.mean(metrics):.2f} ± {np.std(metrics):.2f}"
process = psutil.Process(os.getpid())
# 每个 size 给出显存和内存占用的指标
for size in sizes:
torch.cuda.empty_cache()
time_metrics = []
cpu_metrics = []
memory_metrics = []
gpu_memory_metrics = []
for _ in range(times):
start = time.time()
run_model(model, size)
torch.cuda.synchronize()
# cpu_metrics.append(process.cpu_percent())
time_metrics.append((time.time() - start) * 1000)
memory_metrics.append(process.memory_info().rss / 1024 / 1024)
gpu_memory_metrics.append(nvidia_smi.nvmlDeviceGetMemoryInfo(handle).used / 1024 / 1024)
print(f"size: {size}".center(80, "-"))
# print(f"cpu: {format(cpu_metrics)}")
print(f"latency: {format(time_metrics)}ms")
print(f"memory: {format(memory_metrics)} MB")
print(f"gpu memory: {format(gpu_memory_metrics)} MB")
nvidia_smi.nvmlShutdown()
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--name")
parser.add_argument("--device", default="cuda", type=str)
parser.add_argument("--times", default=10, type=int)
parser.add_argument("--empty-cache", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = get_args_parser()
device = torch.device(args.device)
model = ModelManager(
name=args.name,
device=device,
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=True,
hf_access_token="123"
)
benchmark(model, args.times, args.empty_cache)
| 3,215 | 28.236364 | 100 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/server.py | #!/usr/bin/env python3
import os
import hashlib
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import imghdr
import io
import logging
import multiprocessing
import random
import time
from pathlib import Path
import cv2
import numpy as np
import torch
from PIL import Image
from loguru import logger
from lama_cleaner.const import SD15_MODELS
from lama_cleaner.file_manager import FileManager
from lama_cleaner.model.utils import torch_gc
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.plugins import (
InteractiveSeg,
RemoveBG,
RealESRGANUpscaler,
MakeGIF,
GFPGANPlugin,
RestoreFormerPlugin,
AnimeSeg,
)
from lama_cleaner.schema import Config
try:
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
except:
pass
from flask import (
Flask,
request,
send_file,
cli,
make_response,
send_from_directory,
jsonify,
)
from flask_socketio import SocketIO
# Disable ability for Flask to display warning about using a development server in a production environment.
# https://gist.github.com/jerblack/735b9953ba1ab6234abb43174210d356
cli.show_server_banner = lambda *_: None
from flask_cors import CORS
from lama_cleaner.helper import (
load_img,
numpy_to_bytes,
resize_max_size,
pil_to_bytes,
)
NUM_THREADS = str(multiprocessing.cpu_count())
# fix libomp problem on windows https://github.com/Sanster/lama-cleaner/issues/56
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
if os.environ.get("CACHE_DIR"):
os.environ["TORCH_HOME"] = os.environ["CACHE_DIR"]
BUILD_DIR = os.environ.get("LAMA_CLEANER_BUILD_DIR", "app/build")
class NoFlaskwebgui(logging.Filter):
def filter(self, record):
msg = record.getMessage()
if "Running on http:" in msg:
print(msg[msg.index("Running on http:") :])
return (
"flaskwebgui-keep-server-alive" not in msg
and "socket.io" not in msg
and "This is a development server." not in msg
)
logging.getLogger("werkzeug").addFilter(NoFlaskwebgui())
app = Flask(__name__, static_folder=os.path.join(BUILD_DIR, "static"))
app.config["JSON_AS_ASCII"] = False
CORS(app, expose_headers=["Content-Disposition"])
sio_logger = logging.getLogger("sio-logger")
sio_logger.setLevel(logging.ERROR)
socketio = SocketIO(app, cors_allowed_origins="*", async_mode="threading")
model: ModelManager = None
thumb: FileManager = None
output_dir: str = None
device = None
input_image_path: str = None
is_disable_model_switch: bool = False
is_controlnet: bool = False
controlnet_method: str = "control_v11p_sd15_canny"
is_enable_file_manager: bool = False
is_enable_auto_saving: bool = False
is_desktop: bool = False
image_quality: int = 95
plugins = {}
def get_image_ext(img_bytes):
w = imghdr.what("", img_bytes)
if w is None:
w = "jpeg"
return w
def diffuser_callback(i, t, latents):
socketio.emit("diffusion_progress", {"step": i})
@app.route("/save_image", methods=["POST"])
def save_image():
if output_dir is None:
return "--output-dir is None", 500
input = request.files
filename = request.form["filename"]
origin_image_bytes = input["image"].read() # RGB
ext = get_image_ext(origin_image_bytes)
image, alpha_channel, exif_infos = load_img(origin_image_bytes, return_exif=True)
save_path = os.path.join(output_dir, filename)
if alpha_channel is not None:
if alpha_channel.shape[:2] != image.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(image.shape[1], image.shape[0])
)
image = np.concatenate((image, alpha_channel[:, :, np.newaxis]), axis=-1)
pil_image = Image.fromarray(image)
img_bytes = pil_to_bytes(
pil_image,
ext,
quality=image_quality,
exif_infos=exif_infos,
)
with open(save_path, "wb") as fw:
fw.write(img_bytes)
return "ok", 200
@app.route("/medias/<tab>")
def medias(tab):
if tab == "image":
response = make_response(jsonify(thumb.media_names), 200)
else:
response = make_response(jsonify(thumb.output_media_names), 200)
# response.last_modified = thumb.modified_time[tab]
# response.cache_control.no_cache = True
# response.cache_control.max_age = 0
# response.make_conditional(request)
return response
@app.route("/media/<tab>/<filename>")
def media_file(tab, filename):
if tab == "image":
return send_from_directory(thumb.root_directory, filename)
return send_from_directory(thumb.output_dir, filename)
@app.route("/media_thumbnail/<tab>/<filename>")
def media_thumbnail_file(tab, filename):
args = request.args
width = args.get("width")
height = args.get("height")
if width is None and height is None:
width = 256
if width:
width = int(float(width))
if height:
height = int(float(height))
directory = thumb.root_directory
if tab == "output":
directory = thumb.output_dir
thumb_filename, (width, height) = thumb.get_thumbnail(
directory, filename, width, height
)
thumb_filepath = f"{app.config['THUMBNAIL_MEDIA_THUMBNAIL_ROOT']}{thumb_filename}"
response = make_response(send_file(thumb_filepath))
response.headers["X-Width"] = str(width)
response.headers["X-Height"] = str(height)
return response
@app.route("/inpaint", methods=["POST"])
def process():
input = request.files
# RGB
origin_image_bytes = input["image"].read()
image, alpha_channel, exif_infos = load_img(origin_image_bytes, return_exif=True)
mask, _ = load_img(input["mask"].read(), gray=True)
mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
if image.shape[:2] != mask.shape[:2]:
return (
f"Mask shape{mask.shape[:2]} not queal to Image shape{image.shape[:2]}",
400,
)
original_shape = image.shape
interpolation = cv2.INTER_CUBIC
form = request.form
size_limit = max(image.shape)
if "paintByExampleImage" in input:
paint_by_example_example_image, _ = load_img(
input["paintByExampleImage"].read()
)
paint_by_example_example_image = Image.fromarray(paint_by_example_example_image)
else:
paint_by_example_example_image = None
config = Config(
ldm_steps=form["ldmSteps"],
ldm_sampler=form["ldmSampler"],
hd_strategy=form["hdStrategy"],
zits_wireframe=form["zitsWireframe"],
hd_strategy_crop_margin=form["hdStrategyCropMargin"],
hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
prompt=form["prompt"],
negative_prompt=form["negativePrompt"],
use_croper=form["useCroper"],
croper_x=form["croperX"],
croper_y=form["croperY"],
croper_height=form["croperHeight"],
croper_width=form["croperWidth"],
sd_scale=form["sdScale"],
sd_mask_blur=form["sdMaskBlur"],
sd_strength=form["sdStrength"],
sd_steps=form["sdSteps"],
sd_guidance_scale=form["sdGuidanceScale"],
sd_sampler=form["sdSampler"],
sd_seed=form["sdSeed"],
sd_match_histograms=form["sdMatchHistograms"],
cv2_flag=form["cv2Flag"],
cv2_radius=form["cv2Radius"],
paint_by_example_steps=form["paintByExampleSteps"],
paint_by_example_guidance_scale=form["paintByExampleGuidanceScale"],
paint_by_example_mask_blur=form["paintByExampleMaskBlur"],
paint_by_example_seed=form["paintByExampleSeed"],
paint_by_example_match_histograms=form["paintByExampleMatchHistograms"],
paint_by_example_example_image=paint_by_example_example_image,
p2p_steps=form["p2pSteps"],
p2p_image_guidance_scale=form["p2pImageGuidanceScale"],
p2p_guidance_scale=form["p2pGuidanceScale"],
controlnet_conditioning_scale=form["controlnet_conditioning_scale"],
controlnet_method=form["controlnet_method"],
)
if config.sd_seed == -1:
config.sd_seed = random.randint(1, 999999999)
if config.paint_by_example_seed == -1:
config.paint_by_example_seed = random.randint(1, 999999999)
logger.info(f"Origin image shape: {original_shape}")
image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
start = time.time()
try:
res_np_img = model(image, mask, config)
except RuntimeError as e:
if "CUDA out of memory. " in str(e):
# NOTE: the string may change?
return "CUDA out of memory", 500
else:
logger.exception(e)
return f"{str(e)}", 500
finally:
logger.info(f"process time: {(time.time() - start) * 1000}ms")
torch_gc()
res_np_img = cv2.cvtColor(res_np_img.astype(np.uint8), cv2.COLOR_BGR2RGB)
if alpha_channel is not None:
if alpha_channel.shape[:2] != res_np_img.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
)
res_np_img = np.concatenate(
(res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
)
ext = get_image_ext(origin_image_bytes)
bytes_io = io.BytesIO(
pil_to_bytes(
Image.fromarray(res_np_img),
ext,
quality=image_quality,
exif_infos=exif_infos,
)
)
response = make_response(
send_file(
# io.BytesIO(numpy_to_bytes(res_np_img, ext)),
bytes_io,
mimetype=f"image/{ext}",
)
)
response.headers["X-Seed"] = str(config.sd_seed)
socketio.emit("diffusion_finish")
return response
@app.route("/run_plugin", methods=["POST"])
def run_plugin():
form = request.form
files = request.files
name = form["name"]
if name not in plugins:
return "Plugin not found", 500
origin_image_bytes = files["image"].read() # RGB
rgb_np_img, alpha_channel, exif_infos = load_img(
origin_image_bytes, return_exif=True
)
start = time.time()
try:
form = dict(form)
if name == InteractiveSeg.name:
img_md5 = hashlib.md5(origin_image_bytes).hexdigest()
form["img_md5"] = img_md5
bgr_res = plugins[name](rgb_np_img, files, form)
except RuntimeError as e:
torch.cuda.empty_cache()
if "CUDA out of memory. " in str(e):
# NOTE: the string may change?
return "CUDA out of memory", 500
else:
logger.exception(e)
return "Internal Server Error", 500
logger.info(f"{name} process time: {(time.time() - start) * 1000}ms")
torch_gc()
if name == MakeGIF.name:
return send_file(
io.BytesIO(bgr_res),
mimetype="image/gif",
as_attachment=True,
download_name=form["filename"],
)
if name == InteractiveSeg.name:
return make_response(
send_file(
io.BytesIO(numpy_to_bytes(bgr_res, "png")),
mimetype="image/png",
)
)
if name in [RemoveBG.name, AnimeSeg.name]:
rgb_res = bgr_res
ext = "png"
else:
rgb_res = cv2.cvtColor(bgr_res, cv2.COLOR_BGR2RGB)
ext = get_image_ext(origin_image_bytes)
if alpha_channel is not None:
if alpha_channel.shape[:2] != rgb_res.shape[:2]:
alpha_channel = cv2.resize(
alpha_channel, dsize=(rgb_res.shape[1], rgb_res.shape[0])
)
rgb_res = np.concatenate(
(rgb_res, alpha_channel[:, :, np.newaxis]), axis=-1
)
response = make_response(
send_file(
io.BytesIO(
pil_to_bytes(
Image.fromarray(rgb_res),
ext,
quality=image_quality,
exif_infos=exif_infos,
)
),
mimetype=f"image/{ext}",
)
)
return response
@app.route("/server_config", methods=["GET"])
def get_server_config():
return {
"isControlNet": is_controlnet,
"controlNetMethod": controlnet_method,
"isDisableModelSwitchState": is_disable_model_switch,
"isEnableAutoSaving": is_enable_auto_saving,
"enableFileManager": is_enable_file_manager,
"plugins": list(plugins.keys()),
}, 200
@app.route("/model")
def current_model():
return model.name, 200
@app.route("/model_downloaded/<name>")
def model_downloaded(name):
return str(model.is_downloaded(name)), 200
@app.route("/is_desktop")
def get_is_desktop():
return str(is_desktop), 200
@app.route("/model", methods=["POST"])
def switch_model():
if is_disable_model_switch:
return "Switch model is disabled", 400
new_name = request.form.get("name")
if new_name == model.name:
return "Same model", 200
try:
model.switch(new_name)
except NotImplementedError:
return f"{new_name} not implemented", 403
return f"ok, switch to {new_name}", 200
@app.route("/")
def index():
return send_file(os.path.join(BUILD_DIR, "index.html"))
@app.route("/inputimage")
def set_input_photo():
if input_image_path:
with open(input_image_path, "rb") as f:
image_in_bytes = f.read()
return send_file(
input_image_path,
as_attachment=True,
download_name=Path(input_image_path).name,
mimetype=f"image/{get_image_ext(image_in_bytes)}",
)
else:
return "No Input Image"
def build_plugins(args):
global plugins
if args.enable_interactive_seg:
logger.info(f"Initialize {InteractiveSeg.name} plugin")
plugins[InteractiveSeg.name] = InteractiveSeg(
args.interactive_seg_model, args.interactive_seg_device
)
if args.enable_remove_bg:
logger.info(f"Initialize {RemoveBG.name} plugin")
plugins[RemoveBG.name] = RemoveBG()
if args.enable_anime_seg:
logger.info(f"Initialize {AnimeSeg.name} plugin")
plugins[AnimeSeg.name] = AnimeSeg()
if args.enable_realesrgan:
logger.info(
f"Initialize {RealESRGANUpscaler.name} plugin: {args.realesrgan_model}, {args.realesrgan_device}"
)
plugins[RealESRGANUpscaler.name] = RealESRGANUpscaler(
args.realesrgan_model,
args.realesrgan_device,
no_half=args.realesrgan_no_half,
)
if args.enable_gfpgan:
logger.info(f"Initialize {GFPGANPlugin.name} plugin")
if args.enable_realesrgan:
logger.info("Use realesrgan as GFPGAN background upscaler")
else:
logger.info(
f"GFPGAN no background upscaler, use --enable-realesrgan to enable it"
)
plugins[GFPGANPlugin.name] = GFPGANPlugin(
args.gfpgan_device, upscaler=plugins.get(RealESRGANUpscaler.name, None)
)
if args.enable_restoreformer:
logger.info(f"Initialize {RestoreFormerPlugin.name} plugin")
plugins[RestoreFormerPlugin.name] = RestoreFormerPlugin(
args.restoreformer_device,
upscaler=plugins.get(RealESRGANUpscaler.name, None),
)
if args.enable_gif:
logger.info(f"Initialize GIF plugin")
plugins[MakeGIF.name] = MakeGIF()
def main(args):
global model
global device
global input_image_path
global is_disable_model_switch
global is_enable_file_manager
global is_desktop
global thumb
global output_dir
global is_enable_auto_saving
global is_controlnet
global controlnet_method
global image_quality
build_plugins(args)
image_quality = args.quality
if args.sd_controlnet and args.model in SD15_MODELS:
is_controlnet = True
controlnet_method = args.sd_controlnet_method
output_dir = args.output_dir
if output_dir:
is_enable_auto_saving = True
device = torch.device(args.device)
is_disable_model_switch = args.disable_model_switch
is_desktop = args.gui
if is_disable_model_switch:
logger.info(
f"Start with --disable-model-switch, model switch on frontend is disable"
)
if args.input and os.path.isdir(args.input):
logger.info(f"Initialize file manager")
thumb = FileManager(app)
is_enable_file_manager = True
app.config["THUMBNAIL_MEDIA_ROOT"] = args.input
app.config["THUMBNAIL_MEDIA_THUMBNAIL_ROOT"] = os.path.join(
args.output_dir, "lama_cleaner_thumbnails"
)
thumb.output_dir = Path(args.output_dir)
# thumb.start()
# try:
# while True:
# time.sleep(1)
# finally:
# thumb.image_dir_observer.stop()
# thumb.image_dir_observer.join()
# thumb.output_dir_observer.stop()
# thumb.output_dir_observer.join()
else:
input_image_path = args.input
model = ModelManager(
name=args.model,
sd_controlnet=args.sd_controlnet,
sd_controlnet_method=args.sd_controlnet_method,
device=device,
no_half=args.no_half,
hf_access_token=args.hf_access_token,
disable_nsfw=args.sd_disable_nsfw or args.disable_nsfw,
sd_cpu_textencoder=args.sd_cpu_textencoder,
sd_run_local=args.sd_run_local,
sd_local_model_path=args.sd_local_model_path,
local_files_only=args.local_files_only,
cpu_offload=args.cpu_offload,
enable_xformers=args.sd_enable_xformers or args.enable_xformers,
callback=diffuser_callback,
)
if args.gui:
app_width, app_height = args.gui_size
from flaskwebgui import FlaskUI
ui = FlaskUI(
app,
socketio=socketio,
width=app_width,
height=app_height,
host=args.host,
port=args.port,
close_server_on_exit=not args.no_gui_auto_close,
)
ui.run()
else:
socketio.run(
app,
host=args.host,
port=args.port,
debug=args.debug,
allow_unsafe_werkzeug=True,
)
| 18,878 | 29.303371 | 109 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/helper.py | import io
import os
import sys
from typing import List, Optional
from urllib.parse import urlparse
import cv2
from PIL import Image, ImageOps, PngImagePlugin
import numpy as np
import torch
from lama_cleaner.const import MPS_SUPPORT_MODELS
from loguru import logger
from torch.hub import download_url_to_file, get_dir
import hashlib
def md5sum(filename):
md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(128 * md5.block_size), b""):
md5.update(chunk)
return md5.hexdigest()
def switch_mps_device(model_name, device):
if model_name not in MPS_SUPPORT_MODELS and str(device) == "mps":
logger.info(f"{model_name} not support mps, switch to cpu")
return torch.device("cpu")
return device
def get_cache_path_by_url(url):
parts = urlparse(url)
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
return cached_file
def download_model(url, model_md5: str = None):
cached_file = get_cache_path_by_url(url)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = None
download_url_to_file(url, cached_file, hash_prefix, progress=True)
if model_md5:
_md5 = md5sum(cached_file)
if model_md5 == _md5:
logger.info(f"Download model success, md5: {_md5}")
else:
try:
os.remove(cached_file)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {cached_file} and restart lama-cleaner."
)
exit(-1)
return cached_file
def ceil_modulo(x, mod):
if x % mod == 0:
return x
return (x // mod + 1) * mod
def handle_error(model_path, model_md5, e):
_md5 = md5sum(model_path)
if _md5 != model_md5:
try:
os.remove(model_path)
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, wrong model deleted. Please restart lama-cleaner."
f"If you still have errors, please try download model manually first https://lama-cleaner-docs.vercel.app/install/download_model_manually.\n"
)
except:
logger.error(
f"Model md5: {_md5}, expected md5: {model_md5}, please delete {model_path} and restart lama-cleaner."
)
else:
logger.error(
f"Failed to load model {model_path},"
f"please submit an issue at https://github.com/Sanster/lama-cleaner/issues and include a screenshot of the error:\n{e}"
)
exit(-1)
def load_jit_model(url_or_path, device, model_md5: str):
if os.path.exists(url_or_path):
model_path = url_or_path
else:
model_path = download_model(url_or_path, model_md5)
logger.info(f"Loading model from: {model_path}")
try:
model = torch.jit.load(model_path, map_location="cpu").to(device)
except Exception as e:
handle_error(model_path, model_md5, e)
model.eval()
return model
def load_model(model: torch.nn.Module, url_or_path, device, model_md5):
if os.path.exists(url_or_path):
model_path = url_or_path
else:
model_path = download_model(url_or_path, model_md5)
try:
logger.info(f"Loading model from: {model_path}")
state_dict = torch.load(model_path, map_location="cpu")
model.load_state_dict(state_dict, strict=True)
model.to(device)
except Exception as e:
handle_error(model_path, model_md5, e)
model.eval()
return model
def numpy_to_bytes(image_numpy: np.ndarray, ext: str) -> bytes:
data = cv2.imencode(
f".{ext}",
image_numpy,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)[1]
image_bytes = data.tobytes()
return image_bytes
def pil_to_bytes(pil_img, ext: str, quality: int = 95, exif_infos={}) -> bytes:
with io.BytesIO() as output:
kwargs = {k: v for k, v in exif_infos.items() if v is not None}
if ext == "png" and "parameters" in kwargs:
pnginfo_data = PngImagePlugin.PngInfo()
pnginfo_data.add_text("parameters", kwargs["parameters"])
kwargs["pnginfo"] = pnginfo_data
pil_img.save(
output,
format=ext,
quality=quality,
**kwargs,
)
image_bytes = output.getvalue()
return image_bytes
def load_img(img_bytes, gray: bool = False, return_exif: bool = False):
alpha_channel = None
image = Image.open(io.BytesIO(img_bytes))
if return_exif:
info = image.info or {}
exif_infos = {"exif": image.getexif(), "parameters": info.get("parameters")}
try:
image = ImageOps.exif_transpose(image)
except:
pass
if gray:
image = image.convert("L")
np_img = np.array(image)
else:
if image.mode == "RGBA":
np_img = np.array(image)
alpha_channel = np_img[:, :, -1]
np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB)
else:
image = image.convert("RGB")
np_img = np.array(image)
if return_exif:
return np_img, alpha_channel, exif_infos
return np_img, alpha_channel
def norm_img(np_img):
if len(np_img.shape) == 2:
np_img = np_img[:, :, np.newaxis]
np_img = np.transpose(np_img, (2, 0, 1))
np_img = np_img.astype("float32") / 255
return np_img
def resize_max_size(
np_img, size_limit: int, interpolation=cv2.INTER_CUBIC
) -> np.ndarray:
# Resize image's longer size to size_limit if longer size larger than size_limit
h, w = np_img.shape[:2]
if max(h, w) > size_limit:
ratio = size_limit / max(h, w)
new_w = int(w * ratio + 0.5)
new_h = int(h * ratio + 0.5)
return cv2.resize(np_img, dsize=(new_w, new_h), interpolation=interpolation)
else:
return np_img
def pad_img_to_modulo(
img: np.ndarray, mod: int, square: bool = False, min_size: Optional[int] = None
):
"""
Args:
img: [H, W, C]
mod:
square: 是否为正方形
min_size:
Returns:
"""
if len(img.shape) == 2:
img = img[:, :, np.newaxis]
height, width = img.shape[:2]
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
if min_size is not None:
assert min_size % mod == 0
out_width = max(min_size, out_width)
out_height = max(min_size, out_height)
if square:
max_size = max(out_height, out_width)
out_height = max_size
out_width = max_size
return np.pad(
img,
((0, out_height - height), (0, out_width - width), (0, 0)),
mode="symmetric",
)
def boxes_from_mask(mask: np.ndarray) -> List[np.ndarray]:
"""
Args:
mask: (h, w, 1) 0~255
Returns:
"""
height, width = mask.shape[:2]
_, thresh = cv2.threshold(mask, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
box = np.array([x, y, x + w, y + h]).astype(int)
box[::2] = np.clip(box[::2], 0, width)
box[1::2] = np.clip(box[1::2], 0, height)
boxes.append(box)
return boxes
def only_keep_largest_contour(mask: np.ndarray) -> List[np.ndarray]:
"""
Args:
mask: (h, w) 0~255
Returns:
"""
_, thresh = cv2.threshold(mask, 127, 255, 0)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
max_index = -1
for i, cnt in enumerate(contours):
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
max_index = i
if max_index != -1:
new_mask = np.zeros_like(mask)
return cv2.drawContours(new_mask, contours, max_index, 255, -1)
else:
return mask
| 8,639 | 28.488055 | 165 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/runtime.py | # https://github.com/huggingface/huggingface_hub/blob/5a12851f54bf614be39614034ed3a9031922d297/src/huggingface_hub/utils/_runtime.py
import platform
import sys
import packaging.version
from rich import print
from typing import Dict, Any
_PY_VERSION: str = sys.version.split()[0].rstrip("+")
if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"):
import importlib_metadata # type: ignore
else:
import importlib.metadata as importlib_metadata # type: ignore
_package_versions = {}
_CANDIDATES = [
"torch",
"torchvision",
"Pillow",
"diffusers",
"transformers",
"opencv-python",
"xformers",
"accelerate",
"lama-cleaner",
"rembg",
"realesrgan",
"gfpgan",
]
# Check once at runtime
for name in _CANDIDATES:
_package_versions[name] = "N/A"
try:
_package_versions[name] = importlib_metadata.version(name)
except importlib_metadata.PackageNotFoundError:
pass
def dump_environment_info() -> Dict[str, str]:
"""Dump information about the machine to help debugging issues. """
# Generic machine info
info: Dict[str, Any] = {
"Platform": platform.platform(),
"Python version": platform.python_version(),
}
info.update(_package_versions)
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n")
return info
| 1,374 | 25.960784 | 132 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/parse_args.py | import os
import imghdr
import argparse
from pathlib import Path
from loguru import logger
from lama_cleaner.const import *
from lama_cleaner.runtime import dump_environment_info
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--host", default="127.0.0.1")
parser.add_argument("--port", default=8080, type=int)
parser.add_argument(
"--config-installer",
action="store_true",
help="Open config web page, mainly for windows installer",
)
parser.add_argument(
"--load-installer-config",
action="store_true",
help="Load all cmd args from installer config file",
)
parser.add_argument(
"--installer-config", default=None, help="Config file for windows installer"
)
parser.add_argument("--model", default=DEFAULT_MODEL, choices=AVAILABLE_MODELS)
parser.add_argument("--no-half", action="store_true", help=NO_HALF_HELP)
parser.add_argument("--cpu-offload", action="store_true", help=CPU_OFFLOAD_HELP)
parser.add_argument("--disable-nsfw", action="store_true", help=DISABLE_NSFW_HELP)
parser.add_argument(
"--sd-cpu-textencoder", action="store_true", help=SD_CPU_TEXTENCODER_HELP
)
parser.add_argument("--sd-controlnet", action="store_true", help=SD_CONTROLNET_HELP)
parser.add_argument(
"--sd-controlnet-method",
default=DEFAULT_CONTROLNET_METHOD,
choices=SD_CONTROLNET_CHOICES,
)
parser.add_argument("--sd-local-model-path", default=None, help=SD_LOCAL_MODEL_HELP)
parser.add_argument(
"--local-files-only", action="store_true", help=LOCAL_FILES_ONLY_HELP
)
parser.add_argument(
"--enable-xformers", action="store_true", help=ENABLE_XFORMERS_HELP
)
parser.add_argument(
"--device", default=DEFAULT_DEVICE, type=str, choices=AVAILABLE_DEVICES
)
parser.add_argument("--gui", action="store_true", help=GUI_HELP)
parser.add_argument(
"--no-gui-auto-close", action="store_true", help=NO_GUI_AUTO_CLOSE_HELP
)
parser.add_argument(
"--gui-size",
default=[1600, 1000],
nargs=2,
type=int,
help="Set window size for GUI",
)
parser.add_argument("--input", type=str, default=None, help=INPUT_HELP)
parser.add_argument("--output-dir", type=str, default=None, help=OUTPUT_DIR_HELP)
parser.add_argument(
"--model-dir", type=str, default=DEFAULT_MODEL_DIR, help=MODEL_DIR_HELP
)
parser.add_argument(
"--disable-model-switch",
action="store_true",
help="Disable model switch in frontend",
)
parser.add_argument(
"--quality",
default=95,
type=int,
help=QUALITY_HELP,
)
# Plugins
parser.add_argument(
"--enable-interactive-seg",
action="store_true",
help=INTERACTIVE_SEG_HELP,
)
parser.add_argument(
"--interactive-seg-model",
default="vit_l",
choices=AVAILABLE_INTERACTIVE_SEG_MODELS,
help=INTERACTIVE_SEG_MODEL_HELP,
)
parser.add_argument(
"--interactive-seg-device",
default="cpu",
choices=AVAILABLE_INTERACTIVE_SEG_DEVICES,
)
parser.add_argument(
"--enable-remove-bg",
action="store_true",
help=REMOVE_BG_HELP,
)
parser.add_argument(
"--enable-anime-seg",
action="store_true",
help=ANIMESEG_HELP,
)
parser.add_argument(
"--enable-realesrgan",
action="store_true",
help=REALESRGAN_HELP,
)
parser.add_argument(
"--realesrgan-device",
default="cpu",
type=str,
choices=REALESRGAN_AVAILABLE_DEVICES,
)
parser.add_argument(
"--realesrgan-model",
default=RealESRGANModelName.realesr_general_x4v3.value,
type=str,
choices=RealESRGANModelNameList,
)
parser.add_argument(
"--realesrgan-no-half",
action="store_true",
help="Disable half precision for RealESRGAN",
)
parser.add_argument("--enable-gfpgan", action="store_true", help=GFPGAN_HELP)
parser.add_argument(
"--gfpgan-device", default="cpu", type=str, choices=GFPGAN_AVAILABLE_DEVICES
)
parser.add_argument(
"--enable-restoreformer", action="store_true", help=RESTOREFORMER_HELP
)
parser.add_argument(
"--restoreformer-device",
default="cpu",
type=str,
choices=RESTOREFORMER_AVAILABLE_DEVICES,
)
parser.add_argument(
"--enable-gif",
action="store_true",
help=GIF_HELP,
)
parser.add_argument(
"--install-plugins-package",
action="store_true",
)
#########
# useless args
parser.add_argument("--debug", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--hf_access_token", default="", help=argparse.SUPPRESS)
parser.add_argument(
"--sd-disable-nsfw", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument("--sd-run-local", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--sd-enable-xformers", action="store_true", help=argparse.SUPPRESS
)
args = parser.parse_args()
# collect system info to help debug
dump_environment_info()
if args.install_plugins_package:
from lama_cleaner.installer import install_plugins_package
install_plugins_package()
exit()
if args.config_installer:
if args.installer_config is None:
parser.error(
"args.config_installer==True, must set args.installer_config to store config file"
)
from lama_cleaner.web_config import main
logger.info("Launching installer web config page")
main(args.installer_config)
exit()
if args.load_installer_config:
if args.installer_config and not os.path.exists(args.installer_config):
parser.error(f"args.installer_config={args.installer_config} not exists")
logger.info(f"Loading installer config from {args.installer_config}")
_args = load_config(args.installer_config)
for k, v in vars(_args).items():
if k in vars(args):
setattr(args, k, v)
if args.device == "cuda":
import platform
if platform.system() == "Darwin":
logger.info("MacOS does not support cuda, use cpu instead")
setattr(args, "device", "cpu")
else:
import torch
if torch.cuda.is_available() is False:
parser.error(
"torch.cuda.is_available() is False, please use --device cpu or check your pytorch installation"
)
if args.sd_local_model_path and args.model == "sd1.5":
if not os.path.exists(args.sd_local_model_path):
parser.error(
f"invalid --sd-local-model-path: {args.sd_local_model_path} not exists"
)
if not os.path.isfile(args.sd_local_model_path):
parser.error(
f"invalid --sd-local-model-path: {args.sd_local_model_path} is a directory"
)
os.environ["U2NET_HOME"] = DEFAULT_MODEL_DIR
if args.model_dir and args.model_dir is not None:
if os.path.isfile(args.model_dir):
parser.error(f"invalid --model-dir: {args.model_dir} is a file")
if not os.path.exists(args.model_dir):
logger.info(f"Create model cache directory: {args.model_dir}")
Path(args.model_dir).mkdir(exist_ok=True, parents=True)
os.environ["XDG_CACHE_HOME"] = args.model_dir
os.environ["U2NET_HOME"] = args.model_dir
if args.input and args.input is not None:
if not os.path.exists(args.input):
parser.error(f"invalid --input: {args.input} not exists")
if os.path.isfile(args.input):
if imghdr.what(args.input) is None:
parser.error(f"invalid --input: {args.input} is not a valid image file")
else:
if args.output_dir is None:
parser.error(
f"invalid --input: {args.input} is a directory, --output-dir is required"
)
if args.output_dir is not None:
output_dir = Path(args.output_dir)
if not output_dir.exists():
logger.info(f"Creating output directory: {output_dir}")
output_dir.mkdir(parents=True)
else:
if not output_dir.is_dir():
parser.error(f"invalid --output-dir: {output_dir} is not a directory")
return args
| 8,695 | 32.836576 | 116 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/gfpganer.py | import os
import torch
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from gfpgan import GFPGANv1Clean, GFPGANer
from torch.hub import get_dir
class MyGFPGANer(GFPGANer):
"""Helper for restoration with GFPGAN.
It will detect and crop faces, and then resize the faces to 512x512.
GFPGAN is used to restored the resized faces.
The background is upsampled with the bg_upsampler.
Finally, the faces will be pasted back to the upsample background image.
Args:
model_path (str): The path to the GFPGAN model. It can be urls (will first download it automatically).
upscale (float): The upscale of the final output. Default: 2.
arch (str): The GFPGAN architecture. Option: clean | original. Default: clean.
channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
bg_upsampler (nn.Module): The upsampler for the background. Default: None.
"""
def __init__(
self,
model_path,
upscale=2,
arch="clean",
channel_multiplier=2,
bg_upsampler=None,
device=None,
):
self.upscale = upscale
self.bg_upsampler = bg_upsampler
# initialize model
self.device = (
torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device is None
else device
)
# initialize the GFP-GAN
if arch == "clean":
self.gfpgan = GFPGANv1Clean(
out_size=512,
num_style_feat=512,
channel_multiplier=channel_multiplier,
decoder_load_path=None,
fix_decoder=False,
num_mlp=8,
input_is_latent=True,
different_w=True,
narrow=1,
sft_half=True,
)
elif arch == "RestoreFormer":
from gfpgan.archs.restoreformer_arch import RestoreFormer
self.gfpgan = RestoreFormer()
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
# initialize face helper
self.face_helper = FaceRestoreHelper(
upscale,
face_size=512,
crop_ratio=(1, 1),
det_model="retinaface_resnet50",
save_ext="png",
use_parse=True,
device=self.device,
model_rootpath=model_dir,
)
loadnet = torch.load(model_path)
if "params_ema" in loadnet:
keyname = "params_ema"
else:
keyname = "params"
self.gfpgan.load_state_dict(loadnet[keyname], strict=True)
self.gfpgan.eval()
self.gfpgan = self.gfpgan.to(self.device)
| 2,750 | 31.364706 | 110 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/anime_seg.py | import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from PIL import Image
from lama_cleaner.helper import load_model
from lama_cleaner.plugins.base_plugin import BasePlugin
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dirate=1, stride=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(
in_ch, out_ch, 3, padding=1 * dirate, dilation=1 * dirate, stride=stride
)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src, tar):
src = F.interpolate(src, size=tar.shape[2:], mode="bilinear", align_corners=False)
return src
### RSU-7 ###
class RSU7(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3, img_size=512):
super(RSU7, self).__init__()
self.in_ch = in_ch
self.mid_ch = mid_ch
self.out_ch = out_ch
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1) ## 1 -> 1/2
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv6d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
b, c, h, w = x.shape
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
hx6dup = _upsample_like(hx6d, hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv5d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv4d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)
self.rebnconv3d = REBNCONV(mid_ch * 2, mid_ch, dirate=4)
self.rebnconv2d = REBNCONV(mid_ch * 2, mid_ch, dirate=2)
self.rebnconv1d = REBNCONV(mid_ch * 2, out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))
hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))
return hx1d + hxin
class ISNetDIS(nn.Module):
def __init__(self, in_ch=3, out_ch=1):
super(ISNetDIS, self).__init__()
self.conv_in = nn.Conv2d(in_ch, 64, 3, stride=2, padding=1)
self.pool_in = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage1 = RSU7(64, 32, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 32, 128)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(128, 64, 256)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(256, 128, 512)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(512, 256, 512)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(512, 256, 512)
# decoder
self.stage5d = RSU4F(1024, 256, 512)
self.stage4d = RSU4(1024, 128, 256)
self.stage3d = RSU5(512, 64, 128)
self.stage2d = RSU6(256, 32, 64)
self.stage1d = RSU7(128, 16, 64)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
def forward(self, x):
hx = x
hxin = self.conv_in(hx)
hx = self.pool_in(hxin)
# stage 1
hx1 = self.stage1(hxin)
hx = self.pool12(hx1)
# stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
# stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
# stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
# stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
# stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6, hx5)
# -------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up, hx5), 1))
hx5dup = _upsample_like(hx5d, hx4)
hx4d = self.stage4d(torch.cat((hx5dup, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.stage3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.stage2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.stage1d(torch.cat((hx2dup, hx1), 1))
# side output
d1 = self.side1(hx1d)
d1 = _upsample_like(d1, x)
return d1.sigmoid()
# 从小到大
ANIME_SEG_MODELS = {
"url": "https://github.com/Sanster/models/releases/download/isnetis/isnetis.pth",
"md5": "5f25479076b73074730ab8de9e8f2051",
}
class AnimeSeg(BasePlugin):
# Model from: https://github.com/SkyTNT/anime-segmentation
name = "AnimeSeg"
def __init__(self):
super().__init__()
self.model = load_model(
ISNetDIS(),
ANIME_SEG_MODELS["url"],
"cpu",
ANIME_SEG_MODELS["md5"],
)
def __call__(self, rgb_np_img, files, form):
return self.forward(rgb_np_img)
@torch.no_grad()
def forward(self, rgb_np_img):
s = 1024
h0, w0 = h, w = rgb_np_img.shape[0], rgb_np_img.shape[1]
if h > w:
h, w = s, int(s * w / h)
else:
h, w = int(s * h / w), s
ph, pw = s - h, s - w
tmpImg = np.zeros([s, s, 3], dtype=np.float32)
tmpImg[ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w] = (
cv2.resize(rgb_np_img, (w, h)) / 255
)
tmpImg = tmpImg.transpose((2, 0, 1))
tmpImg = torch.from_numpy(tmpImg).unsqueeze(0).type(torch.FloatTensor)
mask = self.model(tmpImg)
mask = mask[0, :, ph // 2 : ph // 2 + h, pw // 2 : pw // 2 + w]
mask = cv2.resize(mask.cpu().numpy().transpose((1, 2, 0)), (w0, h0))
mask = Image.fromarray((mask * 255).astype("uint8"), mode="L")
empty = Image.new("RGBA", (w0, h0), 0)
img = Image.fromarray(rgb_np_img)
cutout = Image.composite(img, empty, mask)
return np.asarray(cutout)
| 13,465 | 28.530702 | 86 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/remove_bg.py | import os
import cv2
import numpy as np
from torch.hub import get_dir
from lama_cleaner.plugins.base_plugin import BasePlugin
class RemoveBG(BasePlugin):
name = "RemoveBG"
def __init__(self):
super().__init__()
from rembg import new_session
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, "checkpoints")
os.environ["U2NET_HOME"] = model_dir
self.session = new_session(model_name="u2net")
def __call__(self, rgb_np_img, files, form):
bgr_np_img = cv2.cvtColor(rgb_np_img, cv2.COLOR_RGB2BGR)
return self.forward(bgr_np_img)
def forward(self, bgr_np_img) -> np.ndarray:
from rembg import remove
# return BGRA image
output = remove(bgr_np_img, session=self.session)
return cv2.cvtColor(output, cv2.COLOR_BGRA2RGBA)
def check_dep(self):
try:
import rembg
except ImportError:
return (
"RemoveBG is not installed, please install it first. pip install rembg"
)
| 1,053 | 25.35 | 87 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/predictor.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from .modeling import Sam
from typing import Optional, Tuple
class SamPredictor:
def __init__(
self,
sam_model: Sam,
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
sam_model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.model = sam_model
from .utils.transforms import ResizeLongestSide
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[
None, :, :, :
]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
input_image = self.model.preprocess(transformed_image)
self.features = self.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) before mask prediction."
)
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(
point_coords, dtype=torch.float, device=self.device
)
labels_torch = torch.as_tensor(
point_labels, dtype=torch.int, device=self.device
)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(
mask_input, dtype=torch.float, device=self.device
)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks = masks[0].detach().cpu().numpy()
iou_predictions = iou_predictions[0].detach().cpu().numpy()
low_res_masks = low_res_masks[0].detach().cpu().numpy()
return masks, iou_predictions, low_res_masks
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) before mask prediction."
)
if point_coords is not None:
points = (point_coords, point_labels)
else:
points = None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.model.postprocess_masks(
low_res_masks, self.input_size, self.original_size
)
if not return_logits:
masks = masks > self.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) to generate an embedding."
)
assert (
self.features is not None
), "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None
| 11,845 | 40.41958 | 100 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/build_sam.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam,
"vit_h": build_sam,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict)
return sam
| 2,929 | 26.12963 | 89 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/utils/transforms.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
from copy import deepcopy
from typing import Tuple
class ResizeLongestSide:
"""
Resizes images to longest side 'target_length', as well as provides
methods for resizing coordinates and boxes. Provides methods for
transforming both numpy array and batched torch tensors.
"""
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(
image.shape[0], image.shape[1], self.target_length
)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(
self, coords: np.ndarray, original_size: Tuple[int, ...]
) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes(
self, boxes: np.ndarray, original_size: Tuple[int, ...]
) -> np.ndarray:
"""
Expects a numpy array shape Bx4. Requires the original image size
in (H, W) format.
"""
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
"""
Expects batched images with shape BxCxHxW and float format. This
transformation may not exactly match apply_image. apply_image is
the transformation expected by the model.
"""
# Expects an image in BCHW format. May not exactly match apply_image.
target_size = self.get_preprocess_shape(
image.shape[0], image.shape[1], self.target_length
)
return F.interpolate(
image, target_size, mode="bilinear", align_corners=False, antialias=True
)
def apply_coords_torch(
self, coords: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with length 2 in the last dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).to(torch.float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
@staticmethod
def get_preprocess_shape(
oldh: int, oldw: int, long_side_length: int
) -> Tuple[int, int]:
"""
Compute the output size given input size and target long side length.
"""
scale = long_side_length * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww)
| 4,054 | 34.884956 | 84 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/mask_decoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Type
from .common import LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
tranformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for outptu
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Lightly adapted from
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
| 6,614 | 36.372881 | 123 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/image_encoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = x + self.mlp(self.norm2(x))
return x
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
kernel_size: Tuple[int, int] = (16, 16),
stride: Tuple[int, int] = (16, 16),
padding: Tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
) -> None:
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x
| 14,407 | 35.383838 | 202 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/prompt_encoder.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn
from typing import Any, Optional, Tuple, Type
from .common import LayerNorm2d
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class PositionEmbeddingRandom(nn.Module):
"""
Positional encoding using random spatial frequencies.
"""
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer(
"positional_encoding_gaussian_matrix",
scale * torch.randn((2, num_pos_feats)),
)
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
"""Positionally encode points that are normalized to [0,1]."""
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
"""Generate positional encoding for a grid of the specified size."""
h, w = size
device: Any = self.positional_encoding_gaussian_matrix.device
grid = torch.ones((h, w), device=device, dtype=torch.float32)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
) -> torch.Tensor:
"""Positionally encode points that are not normalized to [0,1]."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords.to(torch.float)) # B x N x C
| 8,594 | 38.976744 | 97 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/transformer.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor, nn
import math
from typing import Tuple, Type
from .common import MLPBlock
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
"""
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
Args:
depth (int): number of layers in the transformer
embedding_dim (int): the channel dimension for the input embeddings
num_heads (int): the number of heads for multihead attention. Must
divide embedding_dim
mlp_dim (int): the channel dimension internal to the MLP block
activation (nn.Module): the activation to use in the MLP block
"""
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: Tensor,
image_pe: Tensor,
point_embedding: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Args:
image_embedding (torch.Tensor): image to attend to. Should be shape
B x embedding_dim x h x w for any h and w.
image_pe (torch.Tensor): the positional encoding to add to the image. Must
have the same shape as image_embedding.
point_embedding (torch.Tensor): the embedding to add to the query points.
Must have shape B x N_points x embedding_dim for any N_points.
Returns:
torch.Tensor: the processed point_embedding
torch.Tensor: the processed image_embedding
"""
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
bs, c, h, w = image_embedding.shape
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attenion layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
class TwoWayAttentionBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
"""
A transformer block with four layers: (1) self-attention of sparse
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
block on sparse inputs, and (4) cross attention of dense inputs to sparse
inputs.
Arguments:
embedding_dim (int): the channel dimension of the embeddings
num_heads (int): the number of heads in the attention layers
mlp_dim (int): the hidden dimension of the mlp block
activation (nn.Module): the activation of the mlp block
skip_first_layer_pe (bool): skip the PE on the first layer
"""
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
self.cross_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm2 = nn.LayerNorm(embedding_dim)
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
self.norm3 = nn.LayerNorm(embedding_dim)
self.norm4 = nn.LayerNorm(embedding_dim)
self.cross_attn_image_to_token = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
) -> Tuple[Tensor, Tensor]:
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
else:
q = queries + query_pe
attn_out = self.self_attn(q=q, k=q, v=queries)
queries = queries + attn_out
queries = self.norm1(queries)
# Cross attention block, tokens attending to image embedding
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.norm3(queries)
# Cross attention block, image embedding attending to tokens
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
keys = keys + attn_out
keys = self.norm4(keys)
return queries, keys
class Attention(nn.Module):
"""
An attention layer that allows for downscaling the size of the embedding
after projection to queries, keys, and values.
"""
def __init__(
self,
embedding_dim: int,
num_heads: int,
downsample_rate: int = 1,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.internal_dim = embedding_dim // downsample_rate
self.num_heads = num_heads
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
def _recombine_heads(self, x: Tensor) -> Tensor:
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Attention
_, _, _, c_per_head = q.shape
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
attn = attn / math.sqrt(c_per_head)
attn = torch.softmax(attn, dim=-1)
# Get output
out = attn @ v
out = self._recombine_heads(out)
out = self.out_proj(out)
return out
| 8,396 | 33.842324 | 89 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/common.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from typing import Type
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
| 1,479 | 32.636364 | 136 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/plugins/segment_anything/modeling/sam.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input promts,
C is determiend by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
| 7,225 | 40.291429 | 95 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_paint_by_example.py | from pathlib import Path
import cv2
import pytest
import torch
from PIL import Image
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy
from lama_cleaner.tests.test_model import get_config, get_data
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / 'result'
save_dir.mkdir(exist_ok=True, parents=True)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
def assert_equal(
model, config, gt_name,
fx: float = 1, fy: float = 1,
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
example_p=current_dir / "bunny.jpeg",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
example_image = cv2.imread(str(example_p))
example_image = cv2.cvtColor(example_image, cv2.COLOR_BGRA2RGB)
example_image = cv2.resize(example_image, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
print(f"Input image shape: {img.shape}, example_image: {example_image.shape}")
config.paint_by_example_example_image = Image.fromarray(example_image)
res = model(img, mask, config)
cv2.imwrite(str(save_dir / gt_name), res)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=30)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3,
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_disable_nsfw(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=False)
cfg = get_config(strategy, paint_by_example_steps=30)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_disable_nsfw.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_sd_scale(strategy):
model = ModelManager(name="paint_by_example", device=device, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=30, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_sdscale.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_cpu_offload(strategy):
model = ModelManager(name="paint_by_example", device=device, cpu_offload=True, disable_nsfw=False)
cfg = get_config(strategy, paint_by_example_steps=30, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_cpu_offload.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_paint_by_example_cpu_offload_cpu_device(strategy):
model = ModelManager(name="paint_by_example", device=torch.device('cpu'), cpu_offload=True, disable_nsfw=True)
cfg = get_config(strategy, paint_by_example_steps=1, sd_scale=0.85)
assert_equal(
model,
cfg,
f"paint_by_example_{strategy.capitalize()}_cpu_offload_cpu_device.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fy=0.9,
fx=1.3
)
| 3,985 | 36.252336 | 114 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_instruct_pix2pix.py | from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.tests.test_model import get_config, assert_equal
from lama_cleaner.schema import HDStrategy
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / 'result'
save_dir.mkdir(exist_ok=True, parents=True)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
@pytest.mark.parametrize("disable_nsfw", [True, False])
@pytest.mark.parametrize("cpu_offload", [False, True])
def test_instruct_pix2pix(disable_nsfw, cpu_offload):
sd_steps = 50 if device == 'cuda' else 1
model = ModelManager(name="instruct_pix2pix",
device=torch.device(device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=False,
cpu_offload=cpu_offload)
cfg = get_config(strategy=HDStrategy.ORIGINAL, prompt='What if it were snowing?', p2p_steps=sd_steps, sd_scale=1.1)
name = f"device_{device}_disnsfw_{disable_nsfw}_cpu_offload_{cpu_offload}"
assert_equal(
model,
cfg,
f"instruct_pix2pix_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3
)
@pytest.mark.parametrize("disable_nsfw", [False])
@pytest.mark.parametrize("cpu_offload", [False])
def test_instruct_pix2pix_snow(disable_nsfw, cpu_offload):
sd_steps = 50 if device == 'cuda' else 1
model = ModelManager(name="instruct_pix2pix",
device=torch.device(device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=False,
cpu_offload=cpu_offload)
cfg = get_config(strategy=HDStrategy.ORIGINAL, prompt='What if it were snowing?', p2p_steps=sd_steps)
name = f"snow"
assert_equal(
model,
cfg,
f"instruct_pix2pix_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 2,322 | 35.873016 | 119 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_controlnet.py | import os
from lama_cleaner.const import SD_CONTROLNET_CHOICES
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
@pytest.mark.parametrize("cpu_textencoder", [True])
@pytest.mark.parametrize("disable_nsfw", [True])
@pytest.mark.parametrize("sd_controlnet_method", SD_CONTROLNET_CHOICES)
def test_runway_sd_1_5(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw, sd_controlnet_method
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
sd_controlnet_method=sd_controlnet_method,
)
controlnet_conditioning_scale = {
"control_v11p_sd15_canny": 0.4,
"control_v11p_sd15_openpose": 0.4,
"control_v11p_sd15_inpaint": 1.0,
"control_v11f1p_sd15_depth": 1.0,
}[sd_controlnet_method]
cfg = get_config(
strategy,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=controlnet_conditioning_scale,
controlnet_method=sd_controlnet_method,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_disable_nsfw"
assert_equal(
model,
cfg,
f"sd_controlnet_{sd_controlnet_method}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.2,
fy=1.2,
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
sd_controlnet_method="control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_canny",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_canny_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path_controlnet_native_inpainting(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/v1-5-pruned-emaonly.safetensors",
sd_controlnet_method="control_v11p_sd15_inpaint",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_conditioning_scale=1.0,
sd_strength=1.0,
controlnet_method="control_v11p_sd15_inpaint",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_local_native_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_controlnet_switch(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 30
model = ModelManager(
name="sd1.5",
sd_controlnet=True,
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=False,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_controlnet_method="control_v11p_sd15_canny",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
controlnet_method="control_v11p_sd15_inpaint",
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_controlnet_switch_to_inpaint_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 6,219 | 30.734694 | 85 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_plugins.py | import hashlib
import os
import time
from lama_cleaner.plugins.anime_seg import AnimeSeg
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import cv2
import pytest
import torch.cuda
from lama_cleaner.plugins import (
RemoveBG,
RealESRGANUpscaler,
GFPGANPlugin,
RestoreFormerPlugin,
InteractiveSeg,
)
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
img_p = current_dir / "bunny.jpeg"
img_bytes = open(img_p, "rb").read()
bgr_img = cv2.imread(str(img_p))
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
def _save(img, name):
cv2.imwrite(str(save_dir / name), img)
def test_remove_bg():
model = RemoveBG()
res = model.forward(bgr_img)
res = cv2.cvtColor(res, cv2.COLOR_RGBA2BGRA)
_save(res, "test_remove_bg.png")
def test_anime_seg():
model = AnimeSeg()
img = cv2.imread(str(current_dir / "anime_test.png"))
res = model.forward(img)
assert len(res.shape) == 3
assert res.shape[-1] == 4
_save(res, "test_anime_seg.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_upscale(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = RealESRGANUpscaler("realesr-general-x4v3", device)
res = model.forward(bgr_img, 2)
_save(res, f"test_upscale_x2_{device}.png")
res = model.forward(bgr_img, 4)
_save(res, f"test_upscale_x4_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_gfpgan(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = GFPGANPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_gfpgan_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_restoreformer(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
model = RestoreFormerPlugin(device)
res = model(rgb_img, None, None)
_save(res, f"test_restoreformer_{device}.png")
@pytest.mark.parametrize("device", ["cuda", "cpu", "mps"])
def test_segment_anything(device):
if device == "cuda" and not torch.cuda.is_available():
return
if device == "mps" and not torch.backends.mps.is_available():
return
img_md5 = hashlib.md5(img_bytes).hexdigest()
model = InteractiveSeg("vit_l", device)
new_mask = model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
save_name = f"test_segment_anything_{device}.png"
_save(new_mask, save_name)
start = time.time()
model.forward(rgb_img, [[448 // 2, 394 // 2, 1]], img_md5)
print(f"Time for {save_name}: {time.time() - start:.2f}s")
| 2,965 | 27.519231 | 73 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_model_md5.py | def test_load_model():
from lama_cleaner.plugins import InteractiveSeg
from lama_cleaner.model_manager import ModelManager
interactive_seg_model = InteractiveSeg('vit_l', 'cpu')
models = [
"lama",
"ldm",
"zits",
"mat",
"fcf",
"manga",
]
for m in models:
ModelManager(
name=m,
device="cpu",
no_half=False,
hf_access_token="",
disable_nsfw=False,
sd_cpu_textencoder=True,
sd_run_local=True,
local_files_only=True,
cpu_offload=True,
enable_xformers=False,
)
# def create_empty_file(tmp_dir, name):
# tmp_model_dir = os.path.join(tmp_dir, "torch", "hub", "checkpoints")
# Path(tmp_model_dir).mkdir(exist_ok=True, parents=True)
# path = os.path.join(tmp_model_dir, name)
# with open(path, "w") as f:
# f.write("1")
#
#
# def test_load_model_error():
# MODELS = [
# ("big-lama.pt", "e3aa4aaa15225a33ec84f9f4bc47e500"),
# ("cond_stage_model_encode.pt", "23239fc9081956a3e70de56472b3f296"),
# ("cond_stage_model_decode.pt", "fe419cd15a750d37a4733589d0d3585c"),
# ("diffusion.pt", "b0afda12bf790c03aba2a7431f11d22d"),
# ]
# with tempfile.TemporaryDirectory() as tmp_dir:
# os.environ["XDG_CACHE_HOME"] = tmp_dir
# for name, md5 in MODELS:
# create_empty_file(tmp_dir, name)
# test_load_model()
| 1,505 | 29.12 | 77 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_sd_model.py | import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import HDStrategy, SDSampler
from lama_cleaner.tests.test_model import get_config, assert_equal
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
@pytest.mark.parametrize("cpu_textencoder", [True, False])
@pytest.mark.parametrize("disable_nsfw", [True, False])
def test_runway_sd_1_5_ddim(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize(
"sampler", [SDSampler.pndm, SDSampler.k_lms, SDSampler.k_euler, SDSampler.k_euler_a]
)
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [True])
def test_runway_sd_1_5(sd_device, strategy, sampler, cpu_textencoder, disable_nsfw):
def callback(i, t, latents):
print(f"sd_step_{i}")
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
callback=callback,
)
cfg = get_config(strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.ddim])
def test_runway_sd_1_5_negative_prompt(sd_device, strategy, sampler):
def callback(i, t, latents):
pass
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=False,
sd_cpu_textencoder=False,
callback=callback,
)
cfg = get_config(
strategy,
sd_steps=sd_steps,
prompt="Face of a fox, high resolution, sitting on a park bench",
negative_prompt="orange, yellow, small",
sd_sampler=sampler,
sd_match_histograms=True,
)
name = f"{sampler}_negative_prompt"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
@pytest.mark.parametrize("cpu_textencoder", [False])
@pytest.mark.parametrize("disable_nsfw", [False])
def test_runway_sd_1_5_sd_scale(
sd_device, strategy, sampler, cpu_textencoder, disable_nsfw
):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=disable_nsfw,
sd_cpu_textencoder=cpu_textencoder,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}_cpu_textencoder_{cpu_textencoder}_disnsfw_{disable_nsfw}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_sdscale.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
fx=1.3,
)
@pytest.mark.parametrize("sd_device", ["cuda"])
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("sampler", [SDSampler.k_euler_a])
def test_runway_sd_1_5_cpu_offload(sd_device, strategy, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 50 if sd_device == "cuda" else 1
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
)
cfg = get_config(
strategy, prompt="a fox sitting on a bench", sd_steps=sd_steps, sd_scale=0.85
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"runway_sd_{strategy.capitalize()}_{name}_cpu_offload.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize("sd_device", ["cuda", "mps"])
@pytest.mark.parametrize("sampler", [SDSampler.uni_pc])
def test_local_file_path(sd_device, sampler):
if sd_device == "cuda" and not torch.cuda.is_available():
return
sd_steps = 1 if sd_device == "cpu" else 50
model = ModelManager(
name="sd1.5",
device=torch.device(sd_device),
hf_access_token="",
sd_run_local=True,
disable_nsfw=True,
sd_cpu_textencoder=False,
cpu_offload=True,
sd_local_model_path="/Users/cwq/data/models/sd-v1-5-inpainting.ckpt",
)
cfg = get_config(
HDStrategy.ORIGINAL,
prompt="a fox sitting on a bench",
sd_steps=sd_steps,
)
cfg.sd_sampler = sampler
name = f"device_{sd_device}_{sampler}"
assert_equal(
model,
cfg,
f"sd_local_model_{name}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 7,647 | 30.603306 | 99 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/tests/test_model.py | from pathlib import Path
import cv2
import pytest
import torch
from lama_cleaner.model_manager import ModelManager
from lama_cleaner.schema import Config, HDStrategy, LDMSampler, SDSampler
current_dir = Path(__file__).parent.absolute().resolve()
save_dir = current_dir / "result"
save_dir.mkdir(exist_ok=True, parents=True)
device = "cuda" if torch.cuda.is_available() else "cpu"
device = torch.device(device)
def get_data(
fx: float = 1,
fy: float = 1.0,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img = cv2.imread(str(img_p))
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
mask = cv2.imread(str(mask_p), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, None, fx=fx, fy=fy, interpolation=cv2.INTER_AREA)
mask = cv2.resize(mask, None, fx=fx, fy=fy, interpolation=cv2.INTER_NEAREST)
return img, mask
def get_config(strategy, **kwargs):
data = dict(
ldm_steps=1,
ldm_sampler=LDMSampler.plms,
hd_strategy=strategy,
hd_strategy_crop_margin=32,
hd_strategy_crop_trigger_size=200,
hd_strategy_resize_limit=200,
)
data.update(**kwargs)
return Config(**data)
def assert_equal(
model,
config,
gt_name,
fx: float = 1,
fy: float = 1,
img_p=current_dir / "image.png",
mask_p=current_dir / "mask.png",
):
img, mask = get_data(fx=fx, fy=fy, img_p=img_p, mask_p=mask_p)
print(f"Input image shape: {img.shape}")
res = model(img, mask, config)
cv2.imwrite(
str(save_dir / gt_name),
res,
[int(cv2.IMWRITE_JPEG_QUALITY), 100, int(cv2.IMWRITE_PNG_COMPRESSION), 0],
)
"""
Note that JPEG is lossy compression, so even if it is the highest quality 100,
when the saved images is reloaded, a difference occurs with the original pixel value.
If you want to save the original images as it is, save it as PNG or BMP.
"""
# gt = cv2.imread(str(current_dir / gt_name), cv2.IMREAD_UNCHANGED)
# assert np.array_equal(res, gt)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_lama(strategy):
model = ModelManager(name="lama", device=device)
assert_equal(
model,
get_config(strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_result.png",
)
fx = 1.3
assert_equal(
model,
get_config(strategy),
f"lama_{strategy[0].upper() + strategy[1:]}_fx_{fx}_result.png",
fx=1.3,
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("ldm_sampler", [LDMSampler.ddim, LDMSampler.plms])
def test_ldm(strategy, ldm_sampler):
model = ModelManager(name="ldm", device=device)
cfg = get_config(strategy, ldm_sampler=ldm_sampler)
assert_equal(
model, cfg, f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_result.png"
)
fx = 1.3
assert_equal(
model,
cfg,
f"ldm_{strategy[0].upper() + strategy[1:]}_{ldm_sampler}_fx_{fx}_result.png",
fx=fx,
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("zits_wireframe", [False, True])
def test_zits(strategy, zits_wireframe):
model = ModelManager(name="zits", device=device)
cfg = get_config(strategy, zits_wireframe=zits_wireframe)
# os.environ['ZITS_DEBUG_LINE_PATH'] = str(current_dir / 'zits_debug_line.jpg')
# os.environ['ZITS_DEBUG_EDGE_PATH'] = str(current_dir / 'zits_debug_edge.jpg')
assert_equal(
model,
cfg,
f"zits_{strategy[0].upper() + strategy[1:]}_wireframe_{zits_wireframe}_result.png",
)
fx = 1.3
assert_equal(
model,
cfg,
f"zits_{strategy.capitalize()}_wireframe_{zits_wireframe}_fx_{fx}_result.png",
fx=fx,
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
@pytest.mark.parametrize("no_half", [True, False])
def test_mat(strategy, no_half):
model = ModelManager(name="mat", device=device, no_half=no_half)
cfg = get_config(strategy)
for _ in range(10):
assert_equal(
model,
cfg,
f"mat_{strategy.capitalize()}_result.png",
)
@pytest.mark.parametrize("strategy", [HDStrategy.ORIGINAL])
def test_fcf(strategy):
model = ModelManager(name="fcf", device=device)
cfg = get_config(strategy)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=2, fy=2)
assert_equal(model, cfg, f"fcf_{strategy.capitalize()}_result.png", fx=3.8, fy=2)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
@pytest.mark.parametrize("cv2_flag", ["INPAINT_NS", "INPAINT_TELEA"])
@pytest.mark.parametrize("cv2_radius", [3, 15])
def test_cv2(strategy, cv2_flag, cv2_radius):
model = ModelManager(
name="cv2",
device=torch.device(device),
)
cfg = get_config(strategy, cv2_flag=cv2_flag, cv2_radius=cv2_radius)
assert_equal(
model,
cfg,
f"sd_{strategy.capitalize()}_{cv2_flag}_{cv2_radius}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
@pytest.mark.parametrize(
"strategy", [HDStrategy.ORIGINAL, HDStrategy.RESIZE, HDStrategy.CROP]
)
def test_manga(strategy):
model = ModelManager(
name="manga",
device=torch.device(device),
)
cfg = get_config(strategy)
assert_equal(
model,
cfg,
f"sd_{strategy.capitalize()}.png",
img_p=current_dir / "overture-creations-5sI6fQgYIuo.png",
mask_p=current_dir / "overture-creations-5sI6fQgYIuo_mask.png",
)
| 5,826 | 28.882051 | 91 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/base.py | import abc
from typing import Optional
import cv2
import torch
import numpy as np
from loguru import logger
from lama_cleaner.helper import (
boxes_from_mask,
resize_max_size,
pad_img_to_modulo,
switch_mps_device,
)
from lama_cleaner.schema import Config, HDStrategy
class InpaintModel:
name = "base"
min_size: Optional[int] = None
pad_mod = 8
pad_to_square = False
def __init__(self, device, **kwargs):
"""
Args:
device:
"""
device = switch_mps_device(self.name, device)
self.device = device
self.init_model(device, **kwargs)
@abc.abstractmethod
def init_model(self, device, **kwargs):
...
@staticmethod
@abc.abstractmethod
def is_downloaded() -> bool:
...
@abc.abstractmethod
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W, 1] 255 为 masks 区域
return: BGR IMAGE
"""
...
def _pad_forward(self, image, mask, config: Config):
origin_height, origin_width = image.shape[:2]
pad_image = pad_img_to_modulo(
image, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size
)
pad_mask = pad_img_to_modulo(
mask, mod=self.pad_mod, square=self.pad_to_square, min_size=self.min_size
)
logger.info(f"final forward pad size: {pad_image.shape}")
result = self.forward(pad_image, pad_mask, config)
result = result[0:origin_height, 0:origin_width, :]
result, image, mask = self.forward_post_process(result, image, mask, config)
mask = mask[:, :, np.newaxis]
result = result * (mask / 255) + image[:, :, ::-1] * (1 - (mask / 255))
return result
def forward_post_process(self, result, image, mask, config):
return result, image, mask
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
inpaint_result = None
logger.info(f"hd_strategy: {config.hd_strategy}")
if config.hd_strategy == HDStrategy.CROP:
if max(image.shape) > config.hd_strategy_crop_trigger_size:
logger.info(f"Run crop strategy")
boxes = boxes_from_mask(mask)
crop_result = []
for box in boxes:
crop_image, crop_box = self._run_box(image, mask, box, config)
crop_result.append((crop_image, crop_box))
inpaint_result = image[:, :, ::-1]
for crop_image, crop_box in crop_result:
x1, y1, x2, y2 = crop_box
inpaint_result[y1:y2, x1:x2, :] = crop_image
elif config.hd_strategy == HDStrategy.RESIZE:
if max(image.shape) > config.hd_strategy_resize_limit:
origin_size = image.shape[:2]
downsize_image = resize_max_size(
image, size_limit=config.hd_strategy_resize_limit
)
downsize_mask = resize_max_size(
mask, size_limit=config.hd_strategy_resize_limit
)
logger.info(
f"Run resize strategy, origin size: {image.shape} forward size: {downsize_image.shape}"
)
inpaint_result = self._pad_forward(
downsize_image, downsize_mask, config
)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = mask < 127
inpaint_result[original_pixel_indices] = image[:, :, ::-1][
original_pixel_indices
]
if inpaint_result is None:
inpaint_result = self._pad_forward(image, mask, config)
return inpaint_result
def _crop_box(self, image, mask, box, config: Config):
"""
Args:
image: [H, W, C] RGB
mask: [H, W, 1]
box: [left,top,right,bottom]
Returns:
BGR IMAGE, (l, r, r, b)
"""
box_h = box[3] - box[1]
box_w = box[2] - box[0]
cx = (box[0] + box[2]) // 2
cy = (box[1] + box[3]) // 2
img_h, img_w = image.shape[:2]
w = box_w + config.hd_strategy_crop_margin * 2
h = box_h + config.hd_strategy_crop_margin * 2
_l = cx - w // 2
_r = cx + w // 2
_t = cy - h // 2
_b = cy + h // 2
l = max(_l, 0)
r = min(_r, img_w)
t = max(_t, 0)
b = min(_b, img_h)
# try to get more context when crop around image edge
if _l < 0:
r += abs(_l)
if _r > img_w:
l -= _r - img_w
if _t < 0:
b += abs(_t)
if _b > img_h:
t -= _b - img_h
l = max(l, 0)
r = min(r, img_w)
t = max(t, 0)
b = min(b, img_h)
crop_img = image[t:b, l:r, :]
crop_mask = mask[t:b, l:r]
logger.info(f"box size: ({box_h},{box_w}) crop size: {crop_img.shape}")
return crop_img, crop_mask, [l, t, r, b]
def _calculate_cdf(self, histogram):
cdf = histogram.cumsum()
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf
def _calculate_lookup(self, source_cdf, reference_cdf):
lookup_table = np.zeros(256)
lookup_val = 0
for source_index, source_val in enumerate(source_cdf):
for reference_index, reference_val in enumerate(reference_cdf):
if reference_val >= source_val:
lookup_val = reference_index
break
lookup_table[source_index] = lookup_val
return lookup_table
def _match_histograms(self, source, reference, mask):
transformed_channels = []
for channel in range(source.shape[-1]):
source_channel = source[:, :, channel]
reference_channel = reference[:, :, channel]
# only calculate histograms for non-masked parts
source_histogram, _ = np.histogram(source_channel[mask == 0], 256, [0, 256])
reference_histogram, _ = np.histogram(
reference_channel[mask == 0], 256, [0, 256]
)
source_cdf = self._calculate_cdf(source_histogram)
reference_cdf = self._calculate_cdf(reference_histogram)
lookup = self._calculate_lookup(source_cdf, reference_cdf)
transformed_channels.append(cv2.LUT(source_channel, lookup))
result = cv2.merge(transformed_channels)
result = cv2.convertScaleAbs(result)
return result
def _apply_cropper(self, image, mask, config: Config):
img_h, img_w = image.shape[:2]
l, t, w, h = (
config.croper_x,
config.croper_y,
config.croper_width,
config.croper_height,
)
r = l + w
b = t + h
l = max(l, 0)
r = min(r, img_w)
t = max(t, 0)
b = min(b, img_h)
crop_img = image[t:b, l:r, :]
crop_mask = mask[t:b, l:r]
return crop_img, crop_mask, (l, t, r, b)
def _run_box(self, image, mask, box, config: Config):
"""
Args:
image: [H, W, C] RGB
mask: [H, W, 1]
box: [left,top,right,bottom]
Returns:
BGR IMAGE
"""
crop_img, crop_mask, [l, t, r, b] = self._crop_box(image, mask, box, config)
return self._pad_forward(crop_img, crop_mask, config), [l, t, r, b]
class DiffusionInpaintModel(InpaintModel):
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
# boxes = boxes_from_mask(mask)
if config.use_croper:
crop_img, crop_mask, (l, t, r, b) = self._apply_cropper(image, mask, config)
crop_image = self._scaled_pad_forward(crop_img, crop_mask, config)
inpaint_result = image[:, :, ::-1]
inpaint_result[t:b, l:r, :] = crop_image
else:
inpaint_result = self._scaled_pad_forward(image, mask, config)
return inpaint_result
def _scaled_pad_forward(self, image, mask, config: Config):
longer_side_length = int(config.sd_scale * max(image.shape[:2]))
origin_size = image.shape[:2]
downsize_image = resize_max_size(image, size_limit=longer_side_length)
downsize_mask = resize_max_size(mask, size_limit=longer_side_length)
if config.sd_scale != 1:
logger.info(
f"Resize image to do sd inpainting: {image.shape} -> {downsize_image.shape}"
)
inpaint_result = self._pad_forward(downsize_image, downsize_mask, config)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = mask < 127
inpaint_result[original_pixel_indices] = image[:, :, ::-1][
original_pixel_indices
]
return inpaint_result
| 9,600 | 31.110368 | 107 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/lama.py | import os
import cv2
import numpy as np
import torch
from lama_cleaner.helper import (
norm_img,
get_cache_path_by_url,
load_jit_model,
)
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config
LAMA_MODEL_URL = os.environ.get(
"LAMA_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
)
LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e500")
class LaMa(InpaintModel):
name = "lama"
pad_mod = 8
def init_model(self, device, **kwargs):
self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval()
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL))
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W]
return: BGR IMAGE
"""
image = norm_img(image)
mask = norm_img(mask)
mask = (mask > 0) * 1
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
inpainted_image = self.model(image, mask)
cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8")
cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
return cur_res
| 1,480 | 27.480769 | 85 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/controlnet.py | import gc
import PIL.Image
import cv2
import numpy as np
import torch
from diffusers import ControlNetModel
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import torch_gc, get_scheduler
from lama_cleaner.schema import Config
class CPUTextEncoderWrapper:
def __init__(self, text_encoder, torch_dtype):
self.config = text_encoder.config
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
del text_encoder
torch_gc()
def __call__(self, x, **kwargs):
input_device = x.device
return [
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
.to(input_device)
.to(self.torch_dtype)
]
@property
def dtype(self):
return self.torch_dtype
NAMES_MAP = {
"sd1.5": "runwayml/stable-diffusion-inpainting",
"anything4": "Sanster/anything-4.0-inpainting",
"realisticVision1.4": "Sanster/Realistic_Vision_V1.4-inpainting",
}
NATIVE_NAMES_MAP = {
"sd1.5": "runwayml/stable-diffusion-v1-5",
"anything4": "andite/anything-v4.0",
"realisticVision1.4": "SG161222/Realistic_Vision_V1.4",
}
def make_inpaint_condition(image, image_mask):
"""
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
"""
image = image.astype(np.float32) / 255.0
image[image_mask[:, :, -1] > 128] = -1.0 # set as masked pixel
image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return image
def load_from_local_model(
local_model_path, torch_dtype, controlnet, pipe_class, is_native_control_inpaint
):
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
download_from_original_stable_diffusion_ckpt,
)
logger.info(f"Converting {local_model_path} to diffusers controlnet pipeline")
try:
pipe = download_from_original_stable_diffusion_ckpt(
local_model_path,
num_in_channels=4 if is_native_control_inpaint else 9,
from_safetensors=local_model_path.endswith("safetensors"),
device="cpu",
load_safety_checker=False,
)
except Exception as e:
err_msg = str(e)
logger.exception(e)
if is_native_control_inpaint and "[320, 9, 3, 3]" in err_msg:
logger.error(
"control_v11p_sd15_inpaint method requires normal SD model, not inpainting SD model"
)
if not is_native_control_inpaint and "[320, 4, 3, 3]" in err_msg:
logger.error(
f"{controlnet.config['_name_or_path']} method requires inpainting SD model, "
f"you can convert any SD model to inpainting model in AUTO1111: \n"
f"https://www.reddit.com/r/StableDiffusion/comments/zyi24j/how_to_turn_any_model_into_an_inpainting_model/"
)
exit(-1)
inpaint_pipe = pipe_class(
vae=pipe.vae,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
unet=pipe.unet,
controlnet=controlnet,
scheduler=pipe.scheduler,
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
del pipe
gc.collect()
return inpaint_pipe.to(torch_dtype=torch_dtype)
class ControlNet(DiffusionInpaintModel):
name = "controlnet"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
fp16 = not kwargs.get("no_half", False)
model_kwargs = {
"local_files_only": kwargs.get("local_files_only", kwargs["sd_run_local"])
}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(
dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
)
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
sd_controlnet_method = kwargs["sd_controlnet_method"]
self.sd_controlnet_method = sd_controlnet_method
if sd_controlnet_method == "control_v11p_sd15_inpaint":
from diffusers import StableDiffusionControlNetPipeline as PipeClass
self.is_native_control_inpaint = True
else:
from .pipeline import StableDiffusionControlNetInpaintPipeline as PipeClass
self.is_native_control_inpaint = False
if self.is_native_control_inpaint:
model_id = NATIVE_NAMES_MAP[kwargs["name"]]
else:
model_id = NAMES_MAP[kwargs["name"]]
controlnet = ControlNetModel.from_pretrained(
f"lllyasviel/{sd_controlnet_method}", torch_dtype=torch_dtype
)
self.is_local_sd_model = False
if kwargs.get("sd_local_model_path", None):
self.is_local_sd_model = True
self.model = load_from_local_model(
kwargs["sd_local_model_path"],
torch_dtype=torch_dtype,
controlnet=controlnet,
pipe_class=PipeClass,
is_native_control_inpaint=self.is_native_control_inpaint,
)
else:
self.model = PipeClass.from_pretrained(
model_id,
controlnet=controlnet,
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
**model_kwargs,
)
# https://huggingface.co/docs/diffusers/v0.7.0/en/api/pipelines/stable_diffusion#diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing
self.model.enable_attention_slicing()
# https://huggingface.co/docs/diffusers/v0.7.0/en/optimization/fp16#memory-efficient-attention
if kwargs.get("enable_xformers", False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get("cpu_offload", False) and use_gpu:
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
if kwargs["sd_cpu_textencoder"]:
logger.info("Run Stable Diffusion TextEncoder on CPU")
self.model.text_encoder = CPUTextEncoderWrapper(
self.model.text_encoder, torch_dtype
)
self.callback = kwargs.pop("callback", None)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
scheduler_config = self.model.scheduler.config
scheduler = get_scheduler(config.sd_sampler, scheduler_config)
self.model.scheduler = scheduler
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
img_h, img_w = image.shape[:2]
if self.is_native_control_inpaint:
control_image = make_inpaint_condition(image, mask)
output = self.model(
prompt=config.prompt,
image=control_image,
height=img_h,
width=img_w,
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
controlnet_conditioning_scale=config.controlnet_conditioning_scale,
negative_prompt=config.negative_prompt,
generator=torch.manual_seed(config.sd_seed),
output_type="np.array",
callback=self.callback,
).images[0]
else:
if "canny" in self.sd_controlnet_method:
canny_image = cv2.Canny(image, 100, 200)
canny_image = canny_image[:, :, None]
canny_image = np.concatenate(
[canny_image, canny_image, canny_image], axis=2
)
canny_image = PIL.Image.fromarray(canny_image)
control_image = canny_image
elif "openpose" in self.sd_controlnet_method:
from controlnet_aux import OpenposeDetector
processor = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
control_image = processor(image, hand_and_face=True)
elif "depth" in self.sd_controlnet_method:
from transformers import pipeline
depth_estimator = pipeline("depth-estimation")
depth_image = depth_estimator(PIL.Image.fromarray(image))["depth"]
depth_image = np.array(depth_image)
depth_image = depth_image[:, :, None]
depth_image = np.concatenate(
[depth_image, depth_image, depth_image], axis=2
)
control_image = PIL.Image.fromarray(depth_image)
else:
raise NotImplementedError(
f"{self.sd_controlnet_method} not implemented"
)
mask_image = PIL.Image.fromarray(mask[:, :, -1], mode="L")
image = PIL.Image.fromarray(image)
output = self.model(
image=image,
control_image=control_image,
prompt=config.prompt,
negative_prompt=config.negative_prompt,
mask_image=mask_image,
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
output_type="np.array",
callback=self.callback,
height=img_h,
width=img_w,
generator=torch.manual_seed(config.sd_seed),
controlnet_conditioning_scale=config.controlnet_conditioning_scale,
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.sd_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 10,883 | 36.531034 | 154 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/utils.py | import math
import random
from typing import Any
import torch
import numpy as np
import collections
from itertools import repeat
from diffusers import (
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
UniPCMultistepScheduler,
)
from lama_cleaner.schema import SDSampler
from torch import conv2d, conv_transpose2d
def make_beta_schedule(
device, schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3
):
if schedule == "linear":
betas = (
torch.linspace(
linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64
)
** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
).to(device)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2).to(device)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(
linear_start, linear_end, n_timestep, dtype=torch.float64
)
elif schedule == "sqrt":
betas = (
torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
** 0.5
)
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt(
(1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)
)
if verbose:
print(
f"Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}"
)
print(
f"For the chosen value of eta, which is {eta}, "
f"this results in the following sigma_t schedule for ddim sampler {sigmas}"
)
return sigmas, alphas, alphas_prev
def make_ddim_timesteps(
ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True
):
if ddim_discr_method == "uniform":
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == "quad":
ddim_timesteps = (
(np.linspace(0, np.sqrt(num_ddpm_timesteps * 0.8), num_ddim_timesteps)) ** 2
).astype(int)
else:
raise NotImplementedError(
f'There is no ddim discretization method called "{ddim_discr_method}"'
)
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f"Selected timesteps for ddim sampler: {steps_out}")
return steps_out
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(
shape[0], *((1,) * (len(shape) - 1))
)
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def timestep_embedding(device, timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = torch.exp(
-math.log(max_period)
* torch.arange(start=0, end=half, dtype=torch.float32)
/ half
).to(device=device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
###### MAT and FcF #######
def normalize_2nd_moment(x, dim=1):
return (
x * (x.square().mean(dim=dim, keepdim=True) + torch.finfo(x.dtype).eps).rsqrt()
)
class EasyDict(dict):
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
def __getattr__(self, name: str) -> Any:
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name: str, value: Any) -> None:
self[name] = value
def __delattr__(self, name: str) -> None:
del self[name]
def _bias_act_ref(x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops."""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
def bias_act(
x, b=None, dim=1, act="linear", alpha=None, gain=None, clamp=None, impl="ref"
):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ["ref", "cuda"]
return _bias_act_ref(
x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp
)
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
fw = int(fw)
fh = int(fh)
assert fw >= 1 and fh >= 1
return fw, fh
def _get_weight_shape(w):
shape = [int(sz) for sz in w.shape]
return shape
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def setup_filter(
f,
device=torch.device("cpu"),
normalize=True,
flip_filter=False,
gain=1,
separable=None,
):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = f.ndim == 1 and f.numel() >= 8
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
activation_funcs = {
"linear": EasyDict(
func=lambda x, **_: x,
def_alpha=0,
def_gain=1,
cuda_idx=1,
ref="",
has_2nd_grad=False,
),
"relu": EasyDict(
func=lambda x, **_: torch.nn.functional.relu(x),
def_alpha=0,
def_gain=np.sqrt(2),
cuda_idx=2,
ref="y",
has_2nd_grad=False,
),
"lrelu": EasyDict(
func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha),
def_alpha=0.2,
def_gain=np.sqrt(2),
cuda_idx=3,
ref="y",
has_2nd_grad=False,
),
"tanh": EasyDict(
func=lambda x, **_: torch.tanh(x),
def_alpha=0,
def_gain=1,
cuda_idx=4,
ref="y",
has_2nd_grad=True,
),
"sigmoid": EasyDict(
func=lambda x, **_: torch.sigmoid(x),
def_alpha=0,
def_gain=1,
cuda_idx=5,
ref="y",
has_2nd_grad=True,
),
"elu": EasyDict(
func=lambda x, **_: torch.nn.functional.elu(x),
def_alpha=0,
def_gain=1,
cuda_idx=6,
ref="y",
has_2nd_grad=True,
),
"selu": EasyDict(
func=lambda x, **_: torch.nn.functional.selu(x),
def_alpha=0,
def_gain=1,
cuda_idx=7,
ref="y",
has_2nd_grad=True,
),
"softplus": EasyDict(
func=lambda x, **_: torch.nn.functional.softplus(x),
def_alpha=0,
def_gain=1,
cuda_idx=8,
ref="y",
has_2nd_grad=True,
),
"swish": EasyDict(
func=lambda x, **_: torch.sigmoid(x) * x,
def_alpha=0,
def_gain=np.sqrt(2),
cuda_idx=9,
ref="x",
has_2nd_grad=True,
),
}
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# assert isinstance(x, torch.Tensor)
# assert impl in ['ref', 'cuda']
return _upfirdn2d_ref(
x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain
)
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops."""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
# upx, upy = _parse_scaling(up)
# downx, downy = _parse_scaling(down)
upx, upy = up, up
downx, downy = down, down
# padx0, padx1, pady0, pady1 = _parse_padding(padding)
padx0, padx1, pady0, pady1 = padding[0], padding[1], padding[2], padding[3]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(
x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]
)
x = x[
:,
:,
max(-pady0, 0) : x.shape[2] - max(-pady1, 0),
max(-padx0, 0) : x.shape[3] - max(-padx1, 0),
]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
# padx0, padx1, pady0, pady1 = _parse_padding(padding)
padx0, padx1, pady0, pady1 = padding, padding, padding, padding
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(
x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl
)
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl="cuda"):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
# upx, upy = up, up
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# padx0, padx1, pady0, pady1 = padding, padding, padding, padding
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(
x,
f,
up=up,
padding=p,
flip_filter=flip_filter,
gain=gain * upx * upy,
impl=impl,
)
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
G = (
torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N))
if self.group_size is not None
else N
)
F = self.num_channels
c = C // F
y = x.reshape(
G, -1, F, c, H, W
) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2, 3, 4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
class FullyConnectedLayer(torch.nn.Module):
def __init__(
self,
in_features, # Number of input features.
out_features, # Number of output features.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=1, # Learning rate multiplier.
bias_init=0, # Initial value for the additive bias.
):
super().__init__()
self.weight = torch.nn.Parameter(
torch.randn([out_features, in_features]) / lr_multiplier
)
self.bias = (
torch.nn.Parameter(torch.full([out_features], np.float32(bias_init)))
if bias
else None
)
self.activation = activation
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight * self.weight_gain
b = self.bias
if b is not None and self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == "linear" and b is not None:
# out = torch.addmm(b.unsqueeze(0), x, w.t())
x = x.matmul(w.t())
out = x + b.reshape([-1 if i == x.ndim - 1 else 1 for i in range(x.ndim)])
else:
x = x.matmul(w.t())
out = bias_act(x, b, act=self.activation, dim=x.ndim - 1)
return out
def _conv2d_wrapper(
x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True
):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations."""
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
if (
not flip_weight
): # conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
w = w.flip([2, 3])
# Workaround performance pitfall in cuDNN 8.0.5, triggered when using
# 1x1 kernel + memory_format=channels_last + less than 64 channels.
if (
kw == 1
and kh == 1
and stride == 1
and padding in [0, [0, 0], (0, 0)]
and not transpose
):
if x.stride()[1] == 1 and min(out_channels, in_channels_per_group) < 64:
if out_channels <= 4 and groups == 1:
in_shape = x.shape
x = w.squeeze(3).squeeze(2) @ x.reshape(
[in_shape[0], in_channels_per_group, -1]
)
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d(x, w, groups=groups)
return x.to(memory_format=torch.channels_last)
# Otherwise => execute using conv2d_gradfix.
op = conv_transpose2d if transpose else conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
def conv2d_resample(
x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False
):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2])
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
# assert isinstance(groups, int) and (groups >= 1), f"!!!!!! groups: {groups} isinstance(groups, int) {isinstance(groups, int)} {type(groups)}"
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
# px0, px1, py0, py1 = _parse_padding(padding)
px0, px1, py0, py1 = padding, padding, padding, padding
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d(
x=x, f=f, down=down, padding=[px0, px1, py0, py1], flip_filter=flip_filter
)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d(
x=x,
f=f,
up=up,
padding=[px0, px1, py0, py1],
gain=up ** 2,
flip_filter=flip_filter,
)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d(x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
x = _conv2d_wrapper(
x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight
)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(
groups * in_channels_per_group, out_channels // groups, kh, kw
)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(
x=x,
w=w,
stride=up,
padding=[pyt, pxt],
groups=groups,
transpose=True,
flip_weight=(not flip_weight),
)
x = upfirdn2d(
x=x,
f=f,
padding=[px0 + pxt, px1 + pxt, py0 + pyt, py1 + pyt],
gain=up ** 2,
flip_filter=flip_filter,
)
if down > 1:
x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(
x=x, w=w, padding=[py0, px0], groups=groups, flip_weight=flip_weight
)
# Fallback: Generic reference implementation.
x = upfirdn2d(
x=x,
f=(f if up > 1 else None),
up=up,
padding=[px0, px1, py0, py1],
gain=up ** 2,
flip_filter=flip_filter,
)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
class Conv2dLayer(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
channels_last=False, # Expect the input to have memory_format=channels_last?
trainable=True, # Update the weights of this layer during training?
):
super().__init__()
self.activation = activation
self.up = up
self.down = down
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = activation_funcs[activation].def_gain
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer("weight", weight)
if bias is not None:
self.register_buffer("bias", bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
x = conv2d_resample(
x=x,
w=w,
f=self.resample_filter,
up=self.up,
down=self.down,
padding=self.padding,
)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
out = bias_act(
x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp
)
return out
def torch_gc():
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def get_scheduler(sd_sampler, scheduler_config):
if sd_sampler == SDSampler.ddim:
return DDIMScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.pndm:
return PNDMScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_lms:
return LMSDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_euler:
return EulerDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.k_euler_a:
return EulerAncestralDiscreteScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.dpm_plus_plus:
return DPMSolverMultistepScheduler.from_config(scheduler_config)
elif sd_sampler == SDSampler.uni_pc:
return UniPCMultistepScheduler.from_config(scheduler_config)
else:
raise ValueError(sd_sampler)
| 33,811 | 34.893843 | 148 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/zits.py | import os
import time
import cv2
import torch
import torch.nn.functional as F
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
from lama_cleaner.schema import Config
import numpy as np
from lama_cleaner.model.base import InpaintModel
ZITS_INPAINT_MODEL_URL = os.environ.get(
"ZITS_INPAINT_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-inpaint-0717.pt",
)
ZITS_INPAINT_MODEL_MD5 = os.environ.get(
"ZITS_INPAINT_MODEL_MD5", "9978cc7157dc29699e42308d675b2154"
)
ZITS_EDGE_LINE_MODEL_URL = os.environ.get(
"ZITS_EDGE_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-edge-line-0717.pt",
)
ZITS_EDGE_LINE_MODEL_MD5 = os.environ.get(
"ZITS_EDGE_LINE_MODEL_MD5", "55e31af21ba96bbf0c80603c76ea8c5f"
)
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-structure-upsample-0717.pt",
)
ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5 = os.environ.get(
"ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5", "3d88a07211bd41b2ec8cc0d999f29927"
)
ZITS_WIRE_FRAME_MODEL_URL = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_zits/zits-wireframe-0717.pt",
)
ZITS_WIRE_FRAME_MODEL_MD5 = os.environ.get(
"ZITS_WIRE_FRAME_MODEL_MD5", "a9727c63a8b48b65c905d351b21ce46b"
)
def resize(img, height, width, center_crop=False):
imgh, imgw = img.shape[0:2]
if center_crop and imgh != imgw:
# center crop
side = np.minimum(imgh, imgw)
j = (imgh - side) // 2
i = (imgw - side) // 2
img = img[j : j + side, i : i + side, ...]
if imgh > height and imgw > width:
inter = cv2.INTER_AREA
else:
inter = cv2.INTER_LINEAR
img = cv2.resize(img, (height, width), interpolation=inter)
return img
def to_tensor(img, scale=True, norm=False):
if img.ndim == 2:
img = img[:, :, np.newaxis]
c = img.shape[-1]
if scale:
img_t = torch.from_numpy(img).permute(2, 0, 1).float().div(255)
else:
img_t = torch.from_numpy(img).permute(2, 0, 1).float()
if norm:
mean = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1)
std = torch.tensor([0.5, 0.5, 0.5]).reshape(c, 1, 1)
img_t = (img_t - mean) / std
return img_t
def load_masked_position_encoding(mask):
ones_filter = np.ones((3, 3), dtype=np.float32)
d_filter1 = np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]], dtype=np.float32)
d_filter2 = np.array([[0, 0, 0], [1, 1, 0], [1, 1, 0]], dtype=np.float32)
d_filter3 = np.array([[0, 1, 1], [0, 1, 1], [0, 0, 0]], dtype=np.float32)
d_filter4 = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]], dtype=np.float32)
str_size = 256
pos_num = 128
ori_mask = mask.copy()
ori_h, ori_w = ori_mask.shape[0:2]
ori_mask = ori_mask / 255
mask = cv2.resize(mask, (str_size, str_size), interpolation=cv2.INTER_AREA)
mask[mask > 0] = 255
h, w = mask.shape[0:2]
mask3 = mask.copy()
mask3 = 1.0 - (mask3 / 255.0)
pos = np.zeros((h, w), dtype=np.int32)
direct = np.zeros((h, w, 4), dtype=np.int32)
i = 0
while np.sum(1 - mask3) > 0:
i += 1
mask3_ = cv2.filter2D(mask3, -1, ones_filter)
mask3_[mask3_ > 0] = 1
sub_mask = mask3_ - mask3
pos[sub_mask == 1] = i
m = cv2.filter2D(mask3, -1, d_filter1)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 0] = 1
m = cv2.filter2D(mask3, -1, d_filter2)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 1] = 1
m = cv2.filter2D(mask3, -1, d_filter3)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 2] = 1
m = cv2.filter2D(mask3, -1, d_filter4)
m[m > 0] = 1
m = m - mask3
direct[m == 1, 3] = 1
mask3 = mask3_
abs_pos = pos.copy()
rel_pos = pos / (str_size / 2) # to 0~1 maybe larger than 1
rel_pos = (rel_pos * pos_num).astype(np.int32)
rel_pos = np.clip(rel_pos, 0, pos_num - 1)
if ori_w != w or ori_h != h:
rel_pos = cv2.resize(rel_pos, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
rel_pos[ori_mask == 0] = 0
direct = cv2.resize(direct, (ori_w, ori_h), interpolation=cv2.INTER_NEAREST)
direct[ori_mask == 0, :] = 0
return rel_pos, abs_pos, direct
def load_image(img, mask, device, sigma256=3.0):
"""
Args:
img: [H, W, C] RGB
mask: [H, W] 255 为 masks 区域
sigma256:
Returns:
"""
h, w, _ = img.shape
imgh, imgw = img.shape[0:2]
img_256 = resize(img, 256, 256)
mask = (mask > 127).astype(np.uint8) * 255
mask_256 = cv2.resize(mask, (256, 256), interpolation=cv2.INTER_AREA)
mask_256[mask_256 > 0] = 255
mask_512 = cv2.resize(mask, (512, 512), interpolation=cv2.INTER_AREA)
mask_512[mask_512 > 0] = 255
# original skimage implemention
# https://scikit-image.org/docs/stable/api/skimage.feature.html#skimage.feature.canny
# low_threshold: Lower bound for hysteresis thresholding (linking edges). If None, low_threshold is set to 10% of dtype’s max.
# high_threshold: Upper bound for hysteresis thresholding (linking edges). If None, high_threshold is set to 20% of dtype’s max.
try:
import skimage
gray_256 = skimage.color.rgb2gray(img_256)
edge_256 = skimage.feature.canny(gray_256, sigma=3.0, mask=None).astype(float)
# cv2.imwrite("skimage_gray.jpg", (gray_256*255).astype(np.uint8))
# cv2.imwrite("skimage_edge.jpg", (edge_256*255).astype(np.uint8))
except:
gray_256 = cv2.cvtColor(img_256, cv2.COLOR_RGB2GRAY)
gray_256_blured = cv2.GaussianBlur(gray_256, ksize=(7, 7), sigmaX=sigma256, sigmaY=sigma256)
edge_256 = cv2.Canny(gray_256_blured, threshold1=int(255*0.1), threshold2=int(255*0.2))
# cv2.imwrite("opencv_edge.jpg", edge_256)
# line
img_512 = resize(img, 512, 512)
rel_pos, abs_pos, direct = load_masked_position_encoding(mask)
batch = dict()
batch["images"] = to_tensor(img.copy()).unsqueeze(0).to(device)
batch["img_256"] = to_tensor(img_256, norm=True).unsqueeze(0).to(device)
batch["masks"] = to_tensor(mask).unsqueeze(0).to(device)
batch["mask_256"] = to_tensor(mask_256).unsqueeze(0).to(device)
batch["mask_512"] = to_tensor(mask_512).unsqueeze(0).to(device)
batch["edge_256"] = to_tensor(edge_256, scale=False).unsqueeze(0).to(device)
batch["img_512"] = to_tensor(img_512).unsqueeze(0).to(device)
batch["rel_pos"] = torch.LongTensor(rel_pos).unsqueeze(0).to(device)
batch["abs_pos"] = torch.LongTensor(abs_pos).unsqueeze(0).to(device)
batch["direct"] = torch.LongTensor(direct).unsqueeze(0).to(device)
batch["h"] = imgh
batch["w"] = imgw
return batch
def to_device(data, device):
if isinstance(data, torch.Tensor):
return data.to(device)
if isinstance(data, dict):
for key in data:
if isinstance(data[key], torch.Tensor):
data[key] = data[key].to(device)
return data
if isinstance(data, list):
return [to_device(d, device) for d in data]
class ZITS(InpaintModel):
name = "zits"
min_size = 256
pad_mod = 32
pad_to_square = True
def __init__(self, device, **kwargs):
"""
Args:
device:
"""
super().__init__(device)
self.device = device
self.sample_edge_line_iterations = 1
def init_model(self, device, **kwargs):
self.wireframe = load_jit_model(ZITS_WIRE_FRAME_MODEL_URL, device, ZITS_WIRE_FRAME_MODEL_MD5)
self.edge_line = load_jit_model(ZITS_EDGE_LINE_MODEL_URL, device, ZITS_EDGE_LINE_MODEL_MD5)
self.structure_upsample = load_jit_model(
ZITS_STRUCTURE_UPSAMPLE_MODEL_URL, device, ZITS_STRUCTURE_UPSAMPLE_MODEL_MD5
)
self.inpaint = load_jit_model(ZITS_INPAINT_MODEL_URL, device, ZITS_INPAINT_MODEL_MD5)
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(ZITS_WIRE_FRAME_MODEL_URL),
get_cache_path_by_url(ZITS_EDGE_LINE_MODEL_URL),
get_cache_path_by_url(ZITS_STRUCTURE_UPSAMPLE_MODEL_URL),
get_cache_path_by_url(ZITS_INPAINT_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
def wireframe_edge_and_line(self, items, enable: bool):
# 最终向 items 中添加 edge 和 line key
if not enable:
items["edge"] = torch.zeros_like(items["masks"])
items["line"] = torch.zeros_like(items["masks"])
return
start = time.time()
try:
line_256 = self.wireframe_forward(
items["img_512"],
h=256,
w=256,
masks=items["mask_512"],
mask_th=0.85,
)
except:
line_256 = torch.zeros_like(items["mask_256"])
print(f"wireframe_forward time: {(time.time() - start) * 1000:.2f}ms")
# np_line = (line[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line.jpg", np_line)
start = time.time()
edge_pred, line_pred = self.sample_edge_line_logits(
context=[items["img_256"], items["edge_256"], line_256],
mask=items["mask_256"].clone(),
iterations=self.sample_edge_line_iterations,
add_v=0.05,
mul_v=4,
)
print(f"sample_edge_line_logits time: {(time.time() - start) * 1000:.2f}ms")
# np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("edge_pred.jpg", np_edge_pred)
# np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line_pred.jpg", np_line_pred)
# exit()
input_size = min(items["h"], items["w"])
if input_size != 256 and input_size > 256:
while edge_pred.shape[2] < input_size:
edge_pred = self.structure_upsample(edge_pred)
edge_pred = torch.sigmoid((edge_pred + 2) * 2)
line_pred = self.structure_upsample(line_pred)
line_pred = torch.sigmoid((line_pred + 2) * 2)
edge_pred = F.interpolate(
edge_pred,
size=(input_size, input_size),
mode="bilinear",
align_corners=False,
)
line_pred = F.interpolate(
line_pred,
size=(input_size, input_size),
mode="bilinear",
align_corners=False,
)
# np_edge_pred = (edge_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("edge_pred_upsample.jpg", np_edge_pred)
# np_line_pred = (line_pred[0][0].numpy() * 255).astype(np.uint8)
# cv2.imwrite("line_pred_upsample.jpg", np_line_pred)
# exit()
items["edge"] = edge_pred.detach()
items["line"] = line_pred.detach()
@torch.no_grad()
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W]
return: BGR IMAGE
"""
mask = mask[:, :, 0]
items = load_image(image, mask, device=self.device)
self.wireframe_edge_and_line(items, config.zits_wireframe)
inpainted_image = self.inpaint(
items["images"],
items["masks"],
items["edge"],
items["line"],
items["rel_pos"],
items["direct"],
)
inpainted_image = inpainted_image * 255.0
inpainted_image = (
inpainted_image.cpu().permute(0, 2, 3, 1)[0].numpy().astype(np.uint8)
)
inpainted_image = inpainted_image[:, :, ::-1]
# cv2.imwrite("inpainted.jpg", inpainted_image)
# exit()
return inpainted_image
def wireframe_forward(self, images, h, w, masks, mask_th=0.925):
lcnn_mean = torch.tensor([109.730, 103.832, 98.681]).reshape(1, 3, 1, 1)
lcnn_std = torch.tensor([22.275, 22.124, 23.229]).reshape(1, 3, 1, 1)
images = images * 255.0
# the masks value of lcnn is 127.5
masked_images = images * (1 - masks) + torch.ones_like(images) * masks * 127.5
masked_images = (masked_images - lcnn_mean) / lcnn_std
def to_int(x):
return tuple(map(int, x))
lines_tensor = []
lmap = np.zeros((h, w))
output_masked = self.wireframe(masked_images)
output_masked = to_device(output_masked, "cpu")
if output_masked["num_proposals"] == 0:
lines_masked = []
scores_masked = []
else:
lines_masked = output_masked["lines_pred"].numpy()
lines_masked = [
[line[1] * h, line[0] * w, line[3] * h, line[2] * w]
for line in lines_masked
]
scores_masked = output_masked["lines_score"].numpy()
for line, score in zip(lines_masked, scores_masked):
if score > mask_th:
try:
import skimage
rr, cc, value = skimage.draw.line_aa(
*to_int(line[0:2]), *to_int(line[2:4])
)
lmap[rr, cc] = np.maximum(lmap[rr, cc], value)
except:
cv2.line(lmap, to_int(line[0:2][::-1]), to_int(line[2:4][::-1]), (1, 1, 1), 1, cv2.LINE_AA)
lmap = np.clip(lmap * 255, 0, 255).astype(np.uint8)
lines_tensor.append(to_tensor(lmap).unsqueeze(0))
lines_tensor = torch.cat(lines_tensor, dim=0)
return lines_tensor.detach().to(self.device)
def sample_edge_line_logits(
self, context, mask=None, iterations=1, add_v=0, mul_v=4
):
[img, edge, line] = context
img = img * (1 - mask)
edge = edge * (1 - mask)
line = line * (1 - mask)
for i in range(iterations):
edge_logits, line_logits = self.edge_line(img, edge, line, masks=mask)
edge_pred = torch.sigmoid(edge_logits)
line_pred = torch.sigmoid((line_logits + add_v) * mul_v)
edge = edge + edge_pred * mask
edge[edge >= 0.25] = 1
edge[edge < 0.25] = 0
line = line + line_pred * mask
b, _, h, w = edge_pred.shape
edge_pred = edge_pred.reshape(b, -1, 1)
line_pred = line_pred.reshape(b, -1, 1)
mask = mask.reshape(b, -1)
edge_probs = torch.cat([1 - edge_pred, edge_pred], dim=-1)
line_probs = torch.cat([1 - line_pred, line_pred], dim=-1)
edge_probs[:, :, 1] += 0.5
line_probs[:, :, 1] += 0.5
edge_max_probs = edge_probs.max(dim=-1)[0] + (1 - mask) * (-100)
line_max_probs = line_probs.max(dim=-1)[0] + (1 - mask) * (-100)
indices = torch.sort(
edge_max_probs + line_max_probs, dim=-1, descending=True
)[1]
for ii in range(b):
keep = int((i + 1) / iterations * torch.sum(mask[ii, ...]))
assert torch.sum(mask[ii][indices[ii, :keep]]) == keep, "Error!!!"
mask[ii][indices[ii, :keep]] = 0
mask = mask.reshape(b, 1, h, w)
edge = edge * (1 - mask)
line = line * (1 - mask)
edge, line = edge.to(torch.float32), line.to(torch.float32)
return edge, line
| 15,613 | 33.852679 | 132 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/ddim_sampler.py | import torch
import numpy as np
from tqdm import tqdm
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from loguru import logger
class DDIMSampler(object):
def __init__(self, model, schedule="linear"):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
# array([1])
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod # torch.Size([1000])
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose,
)
self.register_buffer("ddim_sigmas", ddim_sigmas)
self.register_buffer("ddim_alphas", ddim_alphas)
self.register_buffer("ddim_alphas_prev", ddim_alphas_prev)
self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev)
/ (1 - self.alphas_cumprod)
* (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
)
self.register_buffer(
"ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps
)
@torch.no_grad()
def sample(self, steps, conditioning, batch_size, shape):
self.make_schedule(ddim_num_steps=steps, ddim_eta=0, verbose=False)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# samples: 1,3,128,128
return self.ddim_sampling(
conditioning,
size,
quantize_denoised=False,
ddim_use_original_steps=False,
noise_dropout=0,
temperature=1.0,
)
@torch.no_grad()
def ddim_sampling(
self,
cond,
shape,
ddim_use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
):
device = self.model.betas.device
b = shape[0]
img = torch.randn(shape, device=device, dtype=cond.dtype)
timesteps = (
self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
)
time_range = (
reversed(range(0, timesteps))
if ddim_use_original_steps
else np.flip(timesteps)
)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
logger.info(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc="DDIM Sampler", total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
)
img, _ = outs
return img
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
):
b, *_, device = *x.shape, x.device
e_t = self.model.apply_model(x, t, c)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised: # 没用
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1.0 - a_prev - sigma_t ** 2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.0: # 没用
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
| 6,873 | 34.43299 | 99 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/instruct_pix2pix.py | import PIL.Image
import cv2
import torch
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import set_seed
from lama_cleaner.schema import Config
class InstructPix2Pix(DiffusionInpaintModel):
name = "instruct_pix2pix"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
from diffusers import StableDiffusionInstructPix2PixPipeline
fp16 = not kwargs.get('no_half', False)
model_kwargs = {"local_files_only": kwargs.get('local_files_only', False)}
if kwargs['disable_nsfw'] or kwargs.get('cpu_offload', False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False
))
use_gpu = device == torch.device('cuda') and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
self.model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
**model_kwargs
)
self.model.enable_attention_slicing()
if kwargs.get('enable_xformers', False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get('cpu_offload', False) and use_gpu:
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
edit = pipe(prompt, image=image, num_inference_steps=20, image_guidance_scale=1.5, guidance_scale=7).images[0]
"""
output = self.model(
image=PIL.Image.fromarray(image),
prompt=config.prompt,
negative_prompt=config.negative_prompt,
num_inference_steps=config.p2p_steps,
image_guidance_scale=config.p2p_image_guidance_scale,
guidance_scale=config.p2p_guidance_scale,
output_type="np.array",
generator=torch.manual_seed(config.sd_seed)
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
#
# def forward_post_process(self, result, image, mask, config):
# if config.sd_match_histograms:
# result = self._match_histograms(result, image[:, :, ::-1], mask)
#
# if config.sd_mask_blur != 0:
# k = 2 * config.sd_mask_blur + 1
# mask = cv2.GaussianBlur(mask, (k, k), 0)
# return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 3,175 | 36.809524 | 118 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/mat.py | import os
import random
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from lama_cleaner.helper import load_model, get_cache_path_by_url, norm_img
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.model.utils import (
setup_filter,
Conv2dLayer,
FullyConnectedLayer,
conv2d_resample,
bias_act,
upsample2d,
activation_funcs,
MinibatchStdLayer,
to_2tuple,
normalize_2nd_moment,
set_seed,
)
from lama_cleaner.schema import Config
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
style_dim, # dimension of the style code
demodulate=True, # perfrom demodulation
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
):
super().__init__()
self.demodulate = demodulate
self.weight = torch.nn.Parameter(
torch.randn([1, out_channels, in_channels, kernel_size, kernel_size])
)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.padding = self.kernel_size // 2
self.up = up
self.down = down
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(style_dim, in_channels, bias_init=1)
def forward(self, x, style):
batch, in_channels, height, width = x.shape
style = self.affine(style).view(batch, 1, in_channels, 1, 1)
weight = self.weight * self.weight_gain * style
if self.demodulate:
decoefs = (weight.pow(2).sum(dim=[2, 3, 4]) + 1e-8).rsqrt()
weight = weight * decoefs.view(batch, self.out_channels, 1, 1, 1)
weight = weight.view(
batch * self.out_channels, in_channels, self.kernel_size, self.kernel_size
)
x = x.view(1, batch * in_channels, height, width)
x = conv2d_resample(
x=x,
w=weight,
f=self.resample_filter,
up=self.up,
down=self.down,
padding=self.padding,
groups=batch,
)
out = x.view(batch, self.out_channels, *x.shape[2:])
return out
class StyleConv(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
style_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size=3, # Convolution kernel size.
up=1, # Integer upsampling factor.
use_noise=False, # Enable noise input?
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
demodulate=True, # perform demodulation
):
super().__init__()
self.conv = ModulatedConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
style_dim=style_dim,
demodulate=demodulate,
up=up,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
)
self.use_noise = use_noise
self.resolution = resolution
if use_noise:
self.register_buffer("noise_const", torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.activation = activation
self.act_gain = activation_funcs[activation].def_gain
self.conv_clamp = conv_clamp
def forward(self, x, style, noise_mode="random", gain=1):
x = self.conv(x, style)
assert noise_mode in ["random", "const", "none"]
if self.use_noise:
if noise_mode == "random":
xh, xw = x.size()[-2:]
noise = (
torch.randn([x.shape[0], 1, xh, xw], device=x.device)
* self.noise_strength
)
if noise_mode == "const":
noise = self.noise_const * self.noise_strength
x = x + noise
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
out = bias_act(
x, self.bias, act=self.activation, gain=act_gain, clamp=act_clamp
)
return out
class ToRGB(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
style_dim,
kernel_size=1,
resample_filter=[1, 3, 3, 1],
conv_clamp=None,
demodulate=False,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
style_dim=style_dim,
demodulate=demodulate,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
)
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.conv_clamp = conv_clamp
def forward(self, x, style, skip=None):
x = self.conv(x, style)
out = bias_act(x, self.bias, clamp=self.conv_clamp)
if skip is not None:
if skip.shape != out.shape:
skip = upsample2d(skip, self.resample_filter)
out = out + skip
return out
def get_style_code(a, b):
return torch.cat([a, b], dim=1)
class DecBlockFirst(nn.Module):
def __init__(
self,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.fc = FullyConnectedLayer(
in_features=in_channels * 2,
out_features=in_channels * 4 ** 2,
activation=activation,
)
self.conv = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=4,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = x + E_features[2]
style = get_style_code(ws[:, 0], gs)
x = self.conv(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlockFirstV2(nn.Module):
def __init__(
self,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=4,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
# x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = self.conv0(x)
x = x + E_features[2]
style = get_style_code(ws[:, 0], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
): # res = 2, ..., resolution_log2
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, ws, gs, E_features, noise_mode="random"):
style = get_style_code(ws[:, self.res * 2 - 5], gs)
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + E_features[self.res]
style = get_style_code(ws[:, self.res * 2 - 4], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, self.res * 2 - 3], gs)
img = self.toRGB(x, style, skip=img)
return x, img
class MappingNet(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers=8, # Number of mapping layers.
embed_features=None, # Label embedding dimensionality, None = same as w_dim.
layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track.
torch_dtype=torch.float32,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
self.torch_dtype = torch_dtype
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = (
[z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
)
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(
in_features,
out_features,
activation=activation,
lr_multiplier=lr_multiplier,
)
setattr(self, f"fc{idx}", layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer("w_avg", torch.zeros([w_dim]))
def forward(
self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False
):
# Embed, normalize, and concat inputs.
x = None
if self.z_dim > 0:
x = normalize_2nd_moment(z)
if self.c_dim > 0:
y = normalize_2nd_moment(self.embed(c))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f"fc{idx}")
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(
x[:, :truncation_cutoff], truncation_psi
)
return x
class DisFromRGB(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
activation=activation,
)
def forward(self, x):
return self.conv(x)
class DisBlock(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
down=2,
activation=activation,
)
self.skip = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
down=2,
bias=False,
)
def forward(self, x):
skip = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
out = skip + x
return out
class Discriminator(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
channel_decay=1,
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
activation="lrelu",
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.resolution_log2 = resolution_log2
def nf(stage):
return np.clip(
int(channel_base / 2 ** (stage * channel_decay)), 1, channel_max
)
if cmap_dim == None:
cmap_dim = nf(2)
if c_dim == 0:
cmap_dim = 0
self.cmap_dim = cmap_dim
if c_dim > 0:
self.mapping = MappingNet(
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None
)
Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)]
for res in range(resolution_log2, 2, -1):
Dis.append(DisBlock(nf(res), nf(res - 1), activation))
if mbstd_num_channels > 0:
Dis.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis.append(
Conv2dLayer(
nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation
)
)
self.Dis = nn.Sequential(*Dis)
self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation)
self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim)
def forward(self, images_in, masks_in, c):
x = torch.cat([masks_in - 0.5, images_in], dim=1)
x = self.Dis(x)
x = self.fc1(self.fc0(x.flatten(start_dim=1)))
if self.c_dim > 0:
cmap = self.mapping(None, c)
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
return x
def nf(stage, channel_base=32768, channel_decay=1.0, channel_max=512):
NF = {512: 64, 256: 128, 128: 256, 64: 512, 32: 512, 16: 512, 8: 512, 4: 512}
return NF[2 ** stage]
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = FullyConnectedLayer(
in_features=in_features, out_features=hidden_features, activation="lrelu"
)
self.fc2 = FullyConnectedLayer(
in_features=hidden_features, out_features=out_features
)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
# B = windows.shape[0] / (H * W / window_size / window_size)
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Conv2dLayerPartial(nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias=True, # Apply additive bias before the activation function?
activation="linear", # Activation function: 'relu', 'lrelu', etc.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output to +-X, None = disable clamping.
trainable=True, # Update the weights of this layer during training?
):
super().__init__()
self.conv = Conv2dLayer(
in_channels,
out_channels,
kernel_size,
bias,
activation,
up,
down,
resample_filter,
conv_clamp,
trainable,
)
self.weight_maskUpdater = torch.ones(1, 1, kernel_size, kernel_size)
self.slide_winsize = kernel_size ** 2
self.stride = down
self.padding = kernel_size // 2 if kernel_size % 2 == 1 else 0
def forward(self, x, mask=None):
if mask is not None:
with torch.no_grad():
if self.weight_maskUpdater.type() != x.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(x)
update_mask = F.conv2d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
)
mask_ratio = self.slide_winsize / (update_mask.to(torch.float32) + 1e-8)
update_mask = torch.clamp(update_mask, 0, 1) # 0 or 1
mask_ratio = torch.mul(mask_ratio, update_mask).to(x.dtype)
x = self.conv(x)
x = torch.mul(x, mask_ratio)
return x, update_mask
else:
x = self.conv(x)
return x, None
class WindowAttention(nn.Module):
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
down_ratio=1,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = FullyConnectedLayer(in_features=dim, out_features=dim)
self.k = FullyConnectedLayer(in_features=dim, out_features=dim)
self.v = FullyConnectedLayer(in_features=dim, out_features=dim)
self.proj = FullyConnectedLayer(in_features=dim, out_features=dim)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask_windows=None, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
norm_x = F.normalize(x, p=2.0, dim=-1, eps=torch.finfo(x.dtype).eps)
q = (
self.q(norm_x)
.reshape(B_, N, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
k = (
self.k(norm_x)
.view(B_, -1, self.num_heads, C // self.num_heads)
.permute(0, 2, 3, 1)
)
v = (
self.v(x)
.view(B_, -1, self.num_heads, C // self.num_heads)
.permute(0, 2, 1, 3)
)
attn = (q @ k) * self.scale
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
1
).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
if mask_windows is not None:
attn_mask_windows = mask_windows.squeeze(-1).unsqueeze(1).unsqueeze(1)
attn = attn + attn_mask_windows.masked_fill(
attn_mask_windows == 0, float(-100.0)
).masked_fill(attn_mask_windows == 1, float(0.0))
with torch.no_grad():
mask_windows = torch.clamp(
torch.sum(mask_windows, dim=1, keepdim=True), 0, 1
).repeat(1, N, 1)
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
return x, mask_windows
class SwinTransformerBlock(nn.Module):
r"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
input_resolution,
num_heads,
down_ratio=1,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert (
0 <= self.shift_size < self.window_size
), "shift_size must in 0-window_size"
if self.shift_size > 0:
down_ratio = 1
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
down_ratio=down_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.fuse = FullyConnectedLayer(
in_features=dim * 2, out_features=dim, activation="lrelu"
)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
return attn_mask
def forward(self, x, x_size, mask=None):
# H, W = self.input_resolution
H, W = x_size
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = x.view(B, H, W, C)
if mask is not None:
mask = mask.view(B, H, W, 1)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
if mask is not None:
shifted_mask = torch.roll(
mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
)
else:
shifted_x = x
if mask is not None:
shifted_mask = mask
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
if mask is not None:
mask_windows = window_partition(shifted_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size, 1)
else:
mask_windows = None
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows, mask_windows = self.attn(
x_windows, mask_windows, mask=self.attn_mask
) # nW*B, window_size*window_size, C
else:
attn_windows, mask_windows = self.attn(
x_windows,
mask_windows,
mask=self.calculate_mask(x_size).to(x.dtype).to(x.device),
) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
if mask is not None:
mask_windows = mask_windows.view(-1, self.window_size, self.window_size, 1)
shifted_mask = window_reverse(mask_windows, self.window_size, H, W)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
if mask is not None:
mask = torch.roll(
shifted_mask, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
)
else:
x = shifted_x
if mask is not None:
mask = shifted_mask
x = x.view(B, H * W, C)
if mask is not None:
mask = mask.view(B, H * W, 1)
# FFN
x = self.fuse(torch.cat([shortcut, x], dim=-1))
x = self.mlp(x)
return x, mask
class PatchMerging(nn.Module):
def __init__(self, in_channels, out_channels, down=2):
super().__init__()
self.conv = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation="lrelu",
down=down,
)
self.down = down
def forward(self, x, x_size, mask=None):
x = token2feature(x, x_size)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(x, mask)
if self.down != 1:
ratio = 1 / self.down
x_size = (int(x_size[0] * ratio), int(x_size[1] * ratio))
x = feature2token(x)
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class PatchUpsampling(nn.Module):
def __init__(self, in_channels, out_channels, up=2):
super().__init__()
self.conv = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation="lrelu",
up=up,
)
self.up = up
def forward(self, x, x_size, mask=None):
x = token2feature(x, x_size)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(x, mask)
if self.up != 1:
x_size = (int(x_size[0] * self.up), int(x_size[1] * self.up))
x = feature2token(x)
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
input_resolution,
depth,
num_heads,
window_size,
down_ratio=1,
mlp_ratio=2.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# patch merging layer
if downsample is not None:
# self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
self.downsample = downsample
else:
self.downsample = None
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
down_ratio=down_ratio,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i]
if isinstance(drop_path, list)
else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
self.conv = Conv2dLayerPartial(
in_channels=dim, out_channels=dim, kernel_size=3, activation="lrelu"
)
def forward(self, x, x_size, mask=None):
if self.downsample is not None:
x, x_size, mask = self.downsample(x, x_size, mask)
identity = x
for blk in self.blocks:
if self.use_checkpoint:
x, mask = checkpoint.checkpoint(blk, x, x_size, mask)
else:
x, mask = blk(x, x_size, mask)
if mask is not None:
mask = token2feature(mask, x_size)
x, mask = self.conv(token2feature(x, x_size), mask)
x = feature2token(x) + identity
if mask is not None:
mask = feature2token(mask)
return x, x_size, mask
class ToToken(nn.Module):
def __init__(self, in_channels=3, dim=128, kernel_size=5, stride=1):
super().__init__()
self.proj = Conv2dLayerPartial(
in_channels=in_channels,
out_channels=dim,
kernel_size=kernel_size,
activation="lrelu",
)
def forward(self, x, mask):
x, mask = self.proj(x, mask)
return x, mask
class EncFromRGB(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log2
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
activation=activation,
)
self.conv1 = Conv2dLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
return x
class ConvBlockDown(nn.Module):
def __init__(
self, in_channels, out_channels, activation
): # res = 2, ..., resolution_log
super().__init__()
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
down=2,
)
self.conv1 = Conv2dLayer(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
activation=activation,
)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
return x
def token2feature(x, x_size):
B, N, C = x.shape
h, w = x_size
x = x.permute(0, 2, 1).reshape(B, C, h, w)
return x
def feature2token(x):
B, C, H, W = x.shape
x = x.view(B, C, -1).transpose(1, 2)
return x
class Encoder(nn.Module):
def __init__(
self,
res_log2,
img_channels,
activation,
patch_size=5,
channels=16,
drop_path_rate=0.1,
):
super().__init__()
self.resolution = []
for idx, i in enumerate(range(res_log2, 3, -1)): # from input size to 16x16
res = 2 ** i
self.resolution.append(res)
if i == res_log2:
block = EncFromRGB(img_channels * 2 + 1, nf(i), activation)
else:
block = ConvBlockDown(nf(i + 1), nf(i), activation)
setattr(self, "EncConv_Block_%dx%d" % (res, res), block)
def forward(self, x):
out = {}
for res in self.resolution:
res_log2 = int(np.log2(res))
x = getattr(self, "EncConv_Block_%dx%d" % (res, res))(x)
out[res_log2] = x
return out
class ToStyle(nn.Module):
def __init__(self, in_channels, out_channels, activation, drop_rate):
super().__init__()
self.conv = nn.Sequential(
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
down=2,
),
)
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = FullyConnectedLayer(
in_features=in_channels, out_features=out_channels, activation=activation
)
# self.dropout = nn.Dropout(drop_rate)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
x = self.fc(x.flatten(start_dim=1))
# x = self.dropout(x)
return x
class DecBlockFirstV2(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.res = res
self.conv0 = Conv2dLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
activation=activation,
)
self.conv1 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, ws, gs, E_features, noise_mode="random"):
# x = self.fc(x).view(x.shape[0], -1, 4, 4)
x = self.conv0(x)
x = x + E_features[self.res]
style = get_style_code(ws[:, 0], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, 1], gs)
img = self.toRGB(x, style, skip=None)
return x, img
class DecBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
): # res = 4, ..., resolution_log2
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, ws, gs, E_features, noise_mode="random"):
style = get_style_code(ws[:, self.res * 2 - 9], gs)
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + E_features[self.res]
style = get_style_code(ws[:, self.res * 2 - 8], gs)
x = self.conv1(x, style, noise_mode=noise_mode)
style = get_style_code(ws[:, self.res * 2 - 7], gs)
img = self.toRGB(x, style, skip=img)
return x, img
class Decoder(nn.Module):
def __init__(
self, res_log2, activation, style_dim, use_noise, demodulate, img_channels
):
super().__init__()
self.Dec_16x16 = DecBlockFirstV2(
4, nf(4), nf(4), activation, style_dim, use_noise, demodulate, img_channels
)
for res in range(5, res_log2 + 1):
setattr(
self,
"Dec_%dx%d" % (2 ** res, 2 ** res),
DecBlock(
res,
nf(res - 1),
nf(res),
activation,
style_dim,
use_noise,
demodulate,
img_channels,
),
)
self.res_log2 = res_log2
def forward(self, x, ws, gs, E_features, noise_mode="random"):
x, img = self.Dec_16x16(x, ws, gs, E_features, noise_mode=noise_mode)
for res in range(5, self.res_log2 + 1):
block = getattr(self, "Dec_%dx%d" % (2 ** res, 2 ** res))
x, img = block(x, img, ws, gs, E_features, noise_mode=noise_mode)
return img
class DecStyleBlock(nn.Module):
def __init__(
self,
res,
in_channels,
out_channels,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
):
super().__init__()
self.res = res
self.conv0 = StyleConv(
in_channels=in_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
up=2,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.conv1 = StyleConv(
in_channels=out_channels,
out_channels=out_channels,
style_dim=style_dim,
resolution=2 ** res,
kernel_size=3,
use_noise=use_noise,
activation=activation,
demodulate=demodulate,
)
self.toRGB = ToRGB(
in_channels=out_channels,
out_channels=img_channels,
style_dim=style_dim,
kernel_size=1,
demodulate=False,
)
def forward(self, x, img, style, skip, noise_mode="random"):
x = self.conv0(x, style, noise_mode=noise_mode)
x = x + skip
x = self.conv1(x, style, noise_mode=noise_mode)
img = self.toRGB(x, style, skip=img)
return x, img
class FirstStage(nn.Module):
def __init__(
self,
img_channels,
img_resolution=256,
dim=180,
w_dim=512,
use_noise=False,
demodulate=True,
activation="lrelu",
):
super().__init__()
res = 64
self.conv_first = Conv2dLayerPartial(
in_channels=img_channels + 1,
out_channels=dim,
kernel_size=3,
activation=activation,
)
self.enc_conv = nn.ModuleList()
down_time = int(np.log2(img_resolution // res))
# 根据图片尺寸构建 swim transformer 的层数
for i in range(down_time): # from input size to 64
self.enc_conv.append(
Conv2dLayerPartial(
in_channels=dim,
out_channels=dim,
kernel_size=3,
down=2,
activation=activation,
)
)
# from 64 -> 16 -> 64
depths = [2, 3, 4, 3, 2]
ratios = [1, 1 / 2, 1 / 2, 2, 2]
num_heads = 6
window_sizes = [8, 16, 16, 16, 8]
drop_path_rate = 0.1
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.tran = nn.ModuleList()
for i, depth in enumerate(depths):
res = int(res * ratios[i])
if ratios[i] < 1:
merge = PatchMerging(dim, dim, down=int(1 / ratios[i]))
elif ratios[i] > 1:
merge = PatchUpsampling(dim, dim, up=ratios[i])
else:
merge = None
self.tran.append(
BasicLayer(
dim=dim,
input_resolution=[res, res],
depth=depth,
num_heads=num_heads,
window_size=window_sizes[i],
drop_path=dpr[sum(depths[:i]) : sum(depths[: i + 1])],
downsample=merge,
)
)
# global style
down_conv = []
for i in range(int(np.log2(16))):
down_conv.append(
Conv2dLayer(
in_channels=dim,
out_channels=dim,
kernel_size=3,
down=2,
activation=activation,
)
)
down_conv.append(nn.AdaptiveAvgPool2d((1, 1)))
self.down_conv = nn.Sequential(*down_conv)
self.to_style = FullyConnectedLayer(
in_features=dim, out_features=dim * 2, activation=activation
)
self.ws_style = FullyConnectedLayer(
in_features=w_dim, out_features=dim, activation=activation
)
self.to_square = FullyConnectedLayer(
in_features=dim, out_features=16 * 16, activation=activation
)
style_dim = dim * 3
self.dec_conv = nn.ModuleList()
for i in range(down_time): # from 64 to input size
res = res * 2
self.dec_conv.append(
DecStyleBlock(
res,
dim,
dim,
activation,
style_dim,
use_noise,
demodulate,
img_channels,
)
)
def forward(self, images_in, masks_in, ws, noise_mode="random"):
x = torch.cat([masks_in - 0.5, images_in * masks_in], dim=1)
skips = []
x, mask = self.conv_first(x, masks_in) # input size
skips.append(x)
for i, block in enumerate(self.enc_conv): # input size to 64
x, mask = block(x, mask)
if i != len(self.enc_conv) - 1:
skips.append(x)
x_size = x.size()[-2:]
x = feature2token(x)
mask = feature2token(mask)
mid = len(self.tran) // 2
for i, block in enumerate(self.tran): # 64 to 16
if i < mid:
x, x_size, mask = block(x, x_size, mask)
skips.append(x)
elif i > mid:
x, x_size, mask = block(x, x_size, None)
x = x + skips[mid - i]
else:
x, x_size, mask = block(x, x_size, None)
mul_map = torch.ones_like(x) * 0.5
mul_map = F.dropout(mul_map, training=True)
ws = self.ws_style(ws[:, -1])
add_n = self.to_square(ws).unsqueeze(1)
add_n = (
F.interpolate(
add_n, size=x.size(1), mode="linear", align_corners=False
)
.squeeze(1)
.unsqueeze(-1)
)
x = x * mul_map + add_n * (1 - mul_map)
gs = self.to_style(
self.down_conv(token2feature(x, x_size)).flatten(start_dim=1)
)
style = torch.cat([gs, ws], dim=1)
x = token2feature(x, x_size).contiguous()
img = None
for i, block in enumerate(self.dec_conv):
x, img = block(
x, img, style, skips[len(self.dec_conv) - i - 1], noise_mode=noise_mode
)
# ensemble
img = img * (1 - masks_in) + images_in * masks_in
return img
class SynthesisNet(nn.Module):
def __init__(
self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels=3, # Number of color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_decay=1.0,
channel_max=512, # Maximum number of channels in any layer.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
drop_rate=0.5,
use_noise=False,
demodulate=True,
):
super().__init__()
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.num_layers = resolution_log2 * 2 - 3 * 2
self.img_resolution = img_resolution
self.resolution_log2 = resolution_log2
# first stage
self.first_stage = FirstStage(
img_channels,
img_resolution=img_resolution,
w_dim=w_dim,
use_noise=False,
demodulate=demodulate,
)
# second stage
self.enc = Encoder(
resolution_log2, img_channels, activation, patch_size=5, channels=16
)
self.to_square = FullyConnectedLayer(
in_features=w_dim, out_features=16 * 16, activation=activation
)
self.to_style = ToStyle(
in_channels=nf(4),
out_channels=nf(2) * 2,
activation=activation,
drop_rate=drop_rate,
)
style_dim = w_dim + nf(2) * 2
self.dec = Decoder(
resolution_log2, activation, style_dim, use_noise, demodulate, img_channels
)
def forward(self, images_in, masks_in, ws, noise_mode="random", return_stg1=False):
out_stg1 = self.first_stage(images_in, masks_in, ws, noise_mode=noise_mode)
# encoder
x = images_in * masks_in + out_stg1 * (1 - masks_in)
x = torch.cat([masks_in - 0.5, x, images_in * masks_in], dim=1)
E_features = self.enc(x)
fea_16 = E_features[4]
mul_map = torch.ones_like(fea_16) * 0.5
mul_map = F.dropout(mul_map, training=True)
add_n = self.to_square(ws[:, 0]).view(-1, 16, 16).unsqueeze(1)
add_n = F.interpolate(
add_n, size=fea_16.size()[-2:], mode="bilinear", align_corners=False
)
fea_16 = fea_16 * mul_map + add_n * (1 - mul_map)
E_features[4] = fea_16
# style
gs = self.to_style(fea_16)
# decoder
img = self.dec(fea_16, ws, gs, E_features, noise_mode=noise_mode)
# ensemble
img = img * (1 - masks_in) + images_in * masks_in
if not return_stg1:
return img
else:
return img, out_stg1
class Generator(nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # resolution of generated image
img_channels, # Number of input color channels.
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
mapping_kwargs={}, # Arguments for MappingNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNet(
w_dim=w_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**synthesis_kwargs,
)
self.mapping = MappingNet(
z_dim=z_dim,
c_dim=c_dim,
w_dim=w_dim,
num_ws=self.synthesis.num_layers,
**mapping_kwargs,
)
def forward(
self,
images_in,
masks_in,
z,
c,
truncation_psi=1,
truncation_cutoff=None,
skip_w_avg_update=False,
noise_mode="none",
return_stg1=False,
):
ws = self.mapping(
z,
c,
truncation_psi=truncation_psi,
truncation_cutoff=truncation_cutoff,
skip_w_avg_update=skip_w_avg_update,
)
img = self.synthesis(images_in, masks_in, ws, noise_mode=noise_mode)
return img
class Discriminator(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
channel_base=32768, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
channel_decay=1,
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
activation="lrelu",
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
resolution_log2 = int(np.log2(img_resolution))
assert img_resolution == 2 ** resolution_log2 and img_resolution >= 4
self.resolution_log2 = resolution_log2
if cmap_dim == None:
cmap_dim = nf(2)
if c_dim == 0:
cmap_dim = 0
self.cmap_dim = cmap_dim
if c_dim > 0:
self.mapping = MappingNet(
z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None
)
Dis = [DisFromRGB(img_channels + 1, nf(resolution_log2), activation)]
for res in range(resolution_log2, 2, -1):
Dis.append(DisBlock(nf(res), nf(res - 1), activation))
if mbstd_num_channels > 0:
Dis.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis.append(
Conv2dLayer(
nf(2) + mbstd_num_channels, nf(2), kernel_size=3, activation=activation
)
)
self.Dis = nn.Sequential(*Dis)
self.fc0 = FullyConnectedLayer(nf(2) * 4 ** 2, nf(2), activation=activation)
self.fc1 = FullyConnectedLayer(nf(2), 1 if cmap_dim == 0 else cmap_dim)
# for 64x64
Dis_stg1 = [DisFromRGB(img_channels + 1, nf(resolution_log2) // 2, activation)]
for res in range(resolution_log2, 2, -1):
Dis_stg1.append(DisBlock(nf(res) // 2, nf(res - 1) // 2, activation))
if mbstd_num_channels > 0:
Dis_stg1.append(
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
)
Dis_stg1.append(
Conv2dLayer(
nf(2) // 2 + mbstd_num_channels,
nf(2) // 2,
kernel_size=3,
activation=activation,
)
)
self.Dis_stg1 = nn.Sequential(*Dis_stg1)
self.fc0_stg1 = FullyConnectedLayer(
nf(2) // 2 * 4 ** 2, nf(2) // 2, activation=activation
)
self.fc1_stg1 = FullyConnectedLayer(
nf(2) // 2, 1 if cmap_dim == 0 else cmap_dim
)
def forward(self, images_in, masks_in, images_stg1, c):
x = self.Dis(torch.cat([masks_in - 0.5, images_in], dim=1))
x = self.fc1(self.fc0(x.flatten(start_dim=1)))
x_stg1 = self.Dis_stg1(torch.cat([masks_in - 0.5, images_stg1], dim=1))
x_stg1 = self.fc1_stg1(self.fc0_stg1(x_stg1.flatten(start_dim=1)))
if self.c_dim > 0:
cmap = self.mapping(None, c)
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
x_stg1 = (x_stg1 * cmap).sum(dim=1, keepdim=True) * (
1 / np.sqrt(self.cmap_dim)
)
return x, x_stg1
MAT_MODEL_URL = os.environ.get(
"MAT_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_mat/Places_512_FullData_G.pth",
)
MAT_MODEL_MD5 = os.environ.get("MAT_MODEL_MD5", "8ca927835fa3f5e21d65ffcb165377ed")
class MAT(InpaintModel):
name = "mat"
min_size = 512
pad_mod = 512
pad_to_square = True
def init_model(self, device, **kwargs):
seed = 240 # pick up a random number
set_seed(seed)
fp16 = not kwargs.get("no_half", False)
use_gpu = "cuda" in str(device) and torch.cuda.is_available()
self.torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
G = Generator(
z_dim=512,
c_dim=0,
w_dim=512,
img_resolution=512,
img_channels=3,
mapping_kwargs={"torch_dtype": self.torch_dtype},
).to(self.torch_dtype)
# fmt: off
self.model = load_model(G, MAT_MODEL_URL, device, MAT_MODEL_MD5)
self.z = torch.from_numpy(np.random.randn(1, G.z_dim)).to(self.torch_dtype).to(device)
self.label = torch.zeros([1, self.model.c_dim], device=device).to(self.torch_dtype)
# fmt: on
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(MAT_MODEL_URL))
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W] mask area == 255
return: BGR IMAGE
"""
image = norm_img(image) # [0, 1]
image = image * 2 - 1 # [0, 1] -> [-1, 1]
mask = (mask > 127) * 255
mask = 255 - mask
mask = norm_img(mask)
image = (
torch.from_numpy(image).unsqueeze(0).to(self.torch_dtype).to(self.device)
)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.torch_dtype).to(self.device)
output = self.model(
image, mask, self.z, self.label, truncation_psi=1, noise_mode="none"
)
output = (
(output.permute(0, 2, 3, 1) * 127.5 + 127.5)
.round()
.clamp(0, 255)
.to(torch.uint8)
)
output = output[0].cpu().numpy()
cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return cur_res
| 62,603 | 31.336777 | 110 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/manga.py | import os
import random
import cv2
import numpy as np
import torch
import time
from loguru import logger
from lama_cleaner.helper import get_cache_path_by_url, load_jit_model
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.schema import Config
MANGA_INPAINTOR_MODEL_URL = os.environ.get(
"MANGA_INPAINTOR_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/manga_inpaintor.jit",
)
MANGA_INPAINTOR_MODEL_MD5 = os.environ.get(
"MANGA_INPAINTOR_MODEL_MD5", "7d8b269c4613b6b3768af714610da86c"
)
MANGA_LINE_MODEL_URL = os.environ.get(
"MANGA_LINE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/manga/erika.jit",
)
MANGA_LINE_MODEL_MD5 = os.environ.get(
"MANGA_LINE_MODEL_MD5", "0c926d5a4af8450b0d00bc5b9a095644"
)
class Manga(InpaintModel):
name = "manga"
pad_mod = 16
def init_model(self, device, **kwargs):
self.inpaintor_model = load_jit_model(
MANGA_INPAINTOR_MODEL_URL, device, MANGA_INPAINTOR_MODEL_MD5
)
self.line_model = load_jit_model(
MANGA_LINE_MODEL_URL, device, MANGA_LINE_MODEL_MD5
)
self.seed = 42
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(MANGA_INPAINTOR_MODEL_URL),
get_cache_path_by_url(MANGA_LINE_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
def forward(self, image, mask, config: Config):
"""
image: [H, W, C] RGB
mask: [H, W, 1]
return: BGR IMAGE
"""
seed = self.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
gray_img = torch.from_numpy(
gray_img[np.newaxis, np.newaxis, :, :].astype(np.float32)
).to(self.device)
start = time.time()
lines = self.line_model(gray_img)
torch.cuda.empty_cache()
lines = torch.clamp(lines, 0, 255)
logger.info(f"erika_model time: {time.time() - start}")
mask = torch.from_numpy(mask[np.newaxis, :, :, :]).to(self.device)
mask = mask.permute(0, 3, 1, 2)
mask = torch.where(mask > 0.5, 1.0, 0.0)
noise = torch.randn_like(mask)
ones = torch.ones_like(mask)
gray_img = gray_img / 255 * 2 - 1.0
lines = lines / 255 * 2 - 1.0
start = time.time()
inpainted_image = self.inpaintor_model(gray_img, lines, mask, noise, ones)
logger.info(f"image_inpaintor_model time: {time.time() - start}")
cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
cur_res = (cur_res * 127.5 + 127.5).astype(np.uint8)
cur_res = cv2.cvtColor(cur_res, cv2.COLOR_GRAY2BGR)
return cur_res
| 2,884 | 30.358696 | 84 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/ldm.py | import os
import numpy as np
import torch
from loguru import logger
from lama_cleaner.model.base import InpaintModel
from lama_cleaner.model.ddim_sampler import DDIMSampler
from lama_cleaner.model.plms_sampler import PLMSSampler
from lama_cleaner.schema import Config, LDMSampler
torch.manual_seed(42)
import torch.nn as nn
from lama_cleaner.helper import (
download_model,
norm_img,
get_cache_path_by_url,
load_jit_model,
)
from lama_cleaner.model.utils import (
make_beta_schedule,
timestep_embedding,
)
LDM_ENCODE_MODEL_URL = os.environ.get(
"LDM_ENCODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_encode.pt",
)
LDM_ENCODE_MODEL_MD5 = os.environ.get(
"LDM_ENCODE_MODEL_MD5", "23239fc9081956a3e70de56472b3f296"
)
LDM_DECODE_MODEL_URL = os.environ.get(
"LDM_DECODE_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/cond_stage_model_decode.pt",
)
LDM_DECODE_MODEL_MD5 = os.environ.get(
"LDM_DECODE_MODEL_MD5", "fe419cd15a750d37a4733589d0d3585c"
)
LDM_DIFFUSION_MODEL_URL = os.environ.get(
"LDM_DIFFUSION_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_ldm/diffusion.pt",
)
LDM_DIFFUSION_MODEL_MD5 = os.environ.get(
"LDM_DIFFUSION_MODEL_MD5", "b0afda12bf790c03aba2a7431f11d22d"
)
class DDPM(nn.Module):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
device,
timesteps=1000,
beta_schedule="linear",
linear_start=0.0015,
linear_end=0.0205,
cosine_s=0.008,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
parameterization="eps", # all assuming fixed variance schedules
use_positional_encodings=False,
):
super().__init__()
self.device = device
self.parameterization = parameterization
self.use_positional_encodings = use_positional_encodings
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
self.register_schedule(
beta_schedule=beta_schedule,
timesteps=timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
betas = make_beta_schedule(
self.device,
beta_schedule,
timesteps,
linear_start=linear_start,
linear_end=linear_end,
cosine_s=cosine_s,
)
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert (
alphas_cumprod.shape[0] == self.num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: torch.tensor(x, dtype=torch.float32).to(self.device)
self.register_buffer("betas", to_torch(betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer(
"sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod))
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1))
)
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (
1.0 - alphas_cumprod_prev
) / (1.0 - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer("posterior_variance", to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer(
"posterior_log_variance_clipped",
to_torch(np.log(np.maximum(posterior_variance, 1e-20))),
)
self.register_buffer(
"posterior_mean_coef1",
to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)),
)
self.register_buffer(
"posterior_mean_coef2",
to_torch(
(1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod)
),
)
if self.parameterization == "eps":
lvlb_weights = self.betas**2 / (
2
* self.posterior_variance
* to_torch(alphas)
* (1 - self.alphas_cumprod)
)
elif self.parameterization == "x0":
lvlb_weights = (
0.5
* np.sqrt(torch.Tensor(alphas_cumprod))
/ (2.0 * 1 - torch.Tensor(alphas_cumprod))
)
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer("lvlb_weights", lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
class LatentDiffusion(DDPM):
def __init__(
self,
diffusion_model,
device,
cond_stage_key="image",
cond_stage_trainable=False,
concat_mode=True,
scale_factor=1.0,
scale_by_std=False,
*args,
**kwargs,
):
self.num_timesteps_cond = 1
self.scale_by_std = scale_by_std
super().__init__(device, *args, **kwargs)
self.diffusion_model = diffusion_model
self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
self.num_downs = 2
self.scale_factor = scale_factor
def make_cond_schedule(
self,
):
self.cond_ids = torch.full(
size=(self.num_timesteps,),
fill_value=self.num_timesteps - 1,
dtype=torch.long,
)
ids = torch.round(
torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)
).long()
self.cond_ids[: self.num_timesteps_cond] = ids
def register_schedule(
self,
given_betas=None,
beta_schedule="linear",
timesteps=1000,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
):
super().register_schedule(
given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s
)
self.shorten_cond_schedule = self.num_timesteps_cond > 1
if self.shorten_cond_schedule:
self.make_cond_schedule()
def apply_model(self, x_noisy, t, cond):
# x_recon = self.model(x_noisy, t, cond['c_concat'][0]) # cond['c_concat'][0].shape 1,4,128,128
t_emb = timestep_embedding(x_noisy.device, t, 256, repeat_only=False)
x_recon = self.diffusion_model(x_noisy, t_emb, cond)
return x_recon
class LDM(InpaintModel):
name = "ldm"
pad_mod = 32
def __init__(self, device, fp16: bool = True, **kwargs):
self.fp16 = fp16
super().__init__(device)
self.device = device
def init_model(self, device, **kwargs):
self.diffusion_model = load_jit_model(
LDM_DIFFUSION_MODEL_URL, device, LDM_DIFFUSION_MODEL_MD5
)
self.cond_stage_model_decode = load_jit_model(
LDM_DECODE_MODEL_URL, device, LDM_DECODE_MODEL_MD5
)
self.cond_stage_model_encode = load_jit_model(
LDM_ENCODE_MODEL_URL, device, LDM_ENCODE_MODEL_MD5
)
if self.fp16 and "cuda" in str(device):
self.diffusion_model = self.diffusion_model.half()
self.cond_stage_model_decode = self.cond_stage_model_decode.half()
self.cond_stage_model_encode = self.cond_stage_model_encode.half()
self.model = LatentDiffusion(self.diffusion_model, device)
@staticmethod
def is_downloaded() -> bool:
model_paths = [
get_cache_path_by_url(LDM_DIFFUSION_MODEL_URL),
get_cache_path_by_url(LDM_DECODE_MODEL_URL),
get_cache_path_by_url(LDM_ENCODE_MODEL_URL),
]
return all([os.path.exists(it) for it in model_paths])
@torch.cuda.amp.autocast()
def forward(self, image, mask, config: Config):
"""
image: [H, W, C] RGB
mask: [H, W, 1]
return: BGR IMAGE
"""
# image [1,3,512,512] float32
# mask: [1,1,512,512] float32
# masked_image: [1,3,512,512] float32
if config.ldm_sampler == LDMSampler.ddim:
sampler = DDIMSampler(self.model)
elif config.ldm_sampler == LDMSampler.plms:
sampler = PLMSSampler(self.model)
else:
raise ValueError()
steps = config.ldm_steps
image = norm_img(image)
mask = norm_img(mask)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
masked_image = (1 - mask) * image
mask = self._norm(mask)
masked_image = self._norm(masked_image)
c = self.cond_stage_model_encode(masked_image)
torch.cuda.empty_cache()
cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # 1,1,128,128
c = torch.cat((c, cc), dim=1) # 1,4,128,128
shape = (c.shape[1] - 1,) + c.shape[2:]
samples_ddim = sampler.sample(
steps=steps, conditioning=c, batch_size=c.shape[0], shape=shape
)
torch.cuda.empty_cache()
x_samples_ddim = self.cond_stage_model_decode(
samples_ddim
) # samples_ddim: 1, 3, 128, 128 float32
torch.cuda.empty_cache()
# image = torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
# mask = torch.clamp((mask + 1.0) / 2.0, min=0.0, max=1.0)
inpainted_image = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
# inpainted = (1 - mask) * image + mask * predicted_image
inpainted_image = inpainted_image.cpu().numpy().transpose(0, 2, 3, 1)[0] * 255
inpainted_image = inpainted_image.astype(np.uint8)[:, :, ::-1]
return inpainted_image
def _norm(self, tensor):
return tensor * 2.0 - 1.0
| 11,275 | 33.169697 | 116 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/paint_by_example.py | import PIL
import PIL.Image
import cv2
import torch
from diffusers import DiffusionPipeline
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import set_seed
from lama_cleaner.schema import Config
class PaintByExample(DiffusionInpaintModel):
name = "paint_by_example"
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
fp16 = not kwargs.get('no_half', False)
use_gpu = device == torch.device('cuda') and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
model_kwargs = {"local_files_only": kwargs.get('local_files_only', False)}
if kwargs['disable_nsfw'] or kwargs.get('cpu_offload', False):
logger.info("Disable Paint By Example Model NSFW checker")
model_kwargs.update(dict(
safety_checker=None,
requires_safety_checker=False
))
self.model = DiffusionPipeline.from_pretrained(
"Fantasy-Studio/Paint-by-Example",
torch_dtype=torch_dtype,
**model_kwargs
)
self.model.enable_attention_slicing()
if kwargs.get('enable_xformers', False):
self.model.enable_xformers_memory_efficient_attention()
# TODO: gpu_id
if kwargs.get('cpu_offload', False) and use_gpu:
self.model.image_encoder = self.model.image_encoder.to(device)
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
output = self.model(
image=PIL.Image.fromarray(image),
mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"),
example_image=config.paint_by_example_example_image,
num_inference_steps=config.paint_by_example_steps,
output_type='np.array',
generator=torch.manual_seed(config.paint_by_example_seed)
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.paint_by_example_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.paint_by_example_mask_blur != 0:
k = 2 * config.paint_by_example_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
| 2,934 | 35.6875 | 88 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/plms_sampler.py | # From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py
import torch
import numpy as np
from lama_cleaner.model.utils import make_ddim_timesteps, make_ddim_sampling_parameters, noise_like
from tqdm import tqdm
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta, verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
steps,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=False,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=steps, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
samples = self.plms_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
)
return samples
@torch.no_grad()
def plms_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, ):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
time_range = list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running PLMS Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
old_eps=old_eps, t_next=ts_next)
img, pred_x0, e_t = outs
old_eps.append(e_t)
if len(old_eps) >= 4:
old_eps.pop(0)
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
return img
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
b, *_, device = *x.shape, x.device
def get_model_output(x, t):
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
def get_x_prev_and_pred_x0(e_t, index):
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
e_t = get_model_output(x, t)
if len(old_eps) == 0:
# Pseudo Improved Euler (2nd order)
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = (e_t + e_t_next) / 2
elif len(old_eps) == 1:
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (3 * e_t - old_eps[-1]) / 2
elif len(old_eps) == 2:
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
elif len(old_eps) >= 3:
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
return x_prev, pred_x0, e_t
| 11,851 | 51.442478 | 131 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/fcf.py | import os
import random
import cv2
import torch
import numpy as np
import torch.fft as fft
from lama_cleaner.schema import Config
from lama_cleaner.helper import (
load_model,
get_cache_path_by_url,
norm_img,
boxes_from_mask,
resize_max_size,
)
from lama_cleaner.model.base import InpaintModel
from torch import conv2d, nn
import torch.nn.functional as F
from lama_cleaner.model.utils import (
setup_filter,
_parse_scaling,
_parse_padding,
Conv2dLayer,
FullyConnectedLayer,
MinibatchStdLayer,
activation_funcs,
conv2d_resample,
bias_act,
upsample2d,
normalize_2nd_moment,
downsample2d,
)
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl="cuda"):
assert isinstance(x, torch.Tensor)
return _upfirdn2d_ref(
x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain
)
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops."""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(
x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]
)
x = x[
:,
:,
max(-pady0, 0) : x.shape[2] - max(-pady1, 0),
max(-padx0, 0) : x.shape[3] - max(-padx1, 0),
]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
class EncoderEpilogue(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
z_dim, # Output Latent (Z) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture="resnet", # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size=4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels=1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == "skip":
self.fromrgb = Conv2dLayer(
self.img_channels, in_channels, kernel_size=1, activation=activation
)
self.mbstd = (
MinibatchStdLayer(
group_size=mbstd_group_size, num_channels=mbstd_num_channels
)
if mbstd_num_channels > 0
else None
)
self.conv = Conv2dLayer(
in_channels + mbstd_num_channels,
in_channels,
kernel_size=3,
activation=activation,
conv_clamp=conv_clamp,
)
self.fc = FullyConnectedLayer(
in_channels * (resolution**2), z_dim, activation=activation
)
self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, x, cmap, force_fp32=False):
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
const_e = self.conv(x)
x = self.fc(const_e.flatten(1))
x = self.dropout(x)
# Conditioning.
if self.cmap_dim > 0:
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x, const_e
class EncoderBlock(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16=False, # Use FP16 for this block?
fp16_channels_last=False, # Use channels-last memory format with FP16?
freeze_layers=0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels + 1
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = use_fp16 and fp16_channels_last
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = layer_idx >= freeze_layers
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0:
self.fromrgb = Conv2dLayer(
self.img_channels,
tmp_channels,
kernel_size=1,
activation=activation,
trainable=next(trainable_iter),
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.conv0 = Conv2dLayer(
tmp_channels,
tmp_channels,
kernel_size=3,
activation=activation,
trainable=next(trainable_iter),
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.conv1 = Conv2dLayer(
tmp_channels,
out_channels,
kernel_size=3,
activation=activation,
down=2,
trainable=next(trainable_iter),
resample_filter=resample_filter,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
if architecture == "resnet":
self.skip = Conv2dLayer(
tmp_channels,
out_channels,
kernel_size=1,
bias=False,
down=2,
trainable=next(trainable_iter),
resample_filter=resample_filter,
channels_last=self.channels_last,
)
def forward(self, x, img, force_fp32=False):
# dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
dtype = torch.float32
memory_format = (
torch.channels_last
if self.channels_last and not force_fp32
else torch.contiguous_format
)
# Input.
if x is not None:
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0:
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = (
downsample2d(img, self.resample_filter)
if self.architecture == "skip"
else None
)
# Main layers.
if self.architecture == "resnet":
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
feat = x.clone()
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
feat = x.clone()
x = self.conv1(x)
assert x.dtype == dtype
return x, img, feat
class EncoderNetwork(torch.nn.Module):
def __init__(
self,
c_dim, # Conditioning label (C) dimensionality.
z_dim, # Input latent (Z) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture="orig", # Architecture: 'orig', 'skip', 'resnet'.
channel_base=16384, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
num_fp16_res=0, # Use FP16 for the N highest resolutions.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim=None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs={}, # Arguments for DiscriminatorBlock.
mapping_kwargs={}, # Arguments for MappingNetwork.
epilogue_kwargs={}, # Arguments for EncoderEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.z_dim = z_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [
2**i for i in range(self.img_resolution_log2, 2, -1)
]
channels_dict = {
res: min(channel_base // res, channel_max)
for res in self.block_resolutions + [4]
}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(
img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp
)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = res >= fp16_resolution
use_fp16 = False
block = EncoderBlock(
in_channels,
tmp_channels,
out_channels,
resolution=res,
first_layer_idx=cur_layer_idx,
use_fp16=use_fp16,
**block_kwargs,
**common_kwargs,
)
setattr(self, f"b{res}", block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(
z_dim=0,
c_dim=c_dim,
w_dim=cmap_dim,
num_ws=None,
w_avg_beta=None,
**mapping_kwargs,
)
self.b4 = EncoderEpilogue(
channels_dict[4],
cmap_dim=cmap_dim,
z_dim=z_dim * 2,
resolution=4,
**epilogue_kwargs,
**common_kwargs,
)
def forward(self, img, c, **block_kwargs):
x = None
feats = {}
for res in self.block_resolutions:
block = getattr(self, f"b{res}")
x, img, feat = block(x, img, **block_kwargs)
feats[res] = feat
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x, const_e = self.b4(x, cmap)
feats[4] = const_e
B, _ = x.shape
z = torch.zeros(
(B, self.z_dim), requires_grad=False, dtype=x.dtype, device=x.device
) ## Noise for Co-Modulation
return x, z, feats
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [
i
for i in range(x.ndim)
if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)
]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims + 1 :])
assert x.shape == shape
return x
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise=None, # Optional noise tensor to add to the output activations.
up=1, # Integer upsampling factor.
down=1, # Integer downsampling factor.
padding=0, # Padding with respect to the upsampled image.
resample_filter=None,
# Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate=True, # Apply weight demodulation?
flip_weight=True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv=True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (
1
/ np.sqrt(in_channels * kh * kw)
/ weight.norm(float("inf"), dim=[1, 2, 3], keepdim=True)
) # max_Ikk
styles = styles / styles.norm(float("inf"), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(
x=x,
w=weight.to(x.dtype),
f=resample_filter,
up=up,
down=down,
padding=padding,
flip_weight=flip_weight,
)
if demodulate and noise is not None:
x = fma(
x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype)
)
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
batch_size = int(batch_size)
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample(
x=x,
w=w.to(x.dtype),
f=resample_filter,
up=up,
down=down,
padding=padding,
groups=batch_size,
flip_weight=flip_weight,
)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
class SynthesisLayer(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size=3, # Convolution kernel size.
up=1, # Integer upsampling factor.
use_noise=True, # Enable noise input?
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last=False, # Use channels_last format for the weights?
):
super().__init__()
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
self.weight = torch.nn.Parameter(
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
)
if use_noise:
self.register_buffer("noise_const", torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode="none", fused_modconv=True, gain=1):
assert noise_mode in ["random", "const", "none"]
in_resolution = self.resolution // self.up
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == "random":
noise = (
torch.randn(
[x.shape[0], 1, self.resolution, self.resolution], device=x.device
)
* self.noise_strength
)
if self.use_noise and noise_mode == "const":
noise = self.noise_const * self.noise_strength
flip_weight = self.up == 1 # slightly faster
x = modulated_conv2d(
x=x,
weight=self.weight,
styles=styles,
noise=noise,
up=self.up,
padding=self.padding,
resample_filter=self.resample_filter,
flip_weight=flip_weight,
fused_modconv=fused_modconv,
)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = F.leaky_relu(x, negative_slope=0.2, inplace=False)
if act_gain != 1:
x = x * act_gain
if act_clamp is not None:
x = x.clamp(-act_clamp, act_clamp)
return x
class ToRGBLayer(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
w_dim,
kernel_size=1,
conv_clamp=None,
channels_last=False,
):
super().__init__()
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = (
torch.channels_last if channels_last else torch.contiguous_format
)
self.weight = torch.nn.Parameter(
torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
memory_format=memory_format
)
)
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size**2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(
x=x,
weight=self.weight,
styles=styles,
demodulate=False,
fused_modconv=fused_modconv,
)
x = bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
class SynthesisForeword(torch.nn.Module):
def __init__(
self,
z_dim, # Output Latent (Z) dimensionality.
resolution, # Resolution of this block.
in_channels,
img_channels, # Number of input color channels.
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
):
super().__init__()
self.in_channels = in_channels
self.z_dim = z_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
self.fc = FullyConnectedLayer(
self.z_dim, (self.z_dim // 2) * 4 * 4, activation=activation
)
self.conv = SynthesisLayer(
self.in_channels, self.in_channels, w_dim=(z_dim // 2) * 3, resolution=4
)
if architecture == "skip":
self.torgb = ToRGBLayer(
self.in_channels,
self.img_channels,
kernel_size=1,
w_dim=(z_dim // 2) * 3,
)
def forward(self, x, ws, feats, img, force_fp32=False):
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
x_global = x.clone()
# ToRGB.
x = self.fc(x)
x = x.view(-1, self.z_dim // 2, 4, 4)
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
x_skip = feats[4].clone()
x = x + x_skip
mod_vector = []
mod_vector.append(ws[:, 0])
mod_vector.append(x_global.clone())
mod_vector = torch.cat(mod_vector, dim=1)
x = self.conv(x, mod_vector)
mod_vector = []
mod_vector.append(ws[:, 2 * 2 - 3])
mod_vector.append(x_global.clone())
mod_vector = torch.cat(mod_vector, dim=1)
if self.architecture == "skip":
img = self.torgb(x, mod_vector)
img = img.to(dtype=torch.float32, memory_format=torch.contiguous_format)
assert x.dtype == dtype
return x, img
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=False),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
res = x * y.expand_as(x)
return res
class FourierUnit(nn.Module):
def __init__(
self,
in_channels,
out_channels,
groups=1,
spatial_scale_factor=None,
spatial_scale_mode="bilinear",
spectral_pos_encoding=False,
use_se=False,
se_kwargs=None,
ffc3d=False,
fft_norm="ortho",
):
# bn_layer not used
super(FourierUnit, self).__init__()
self.groups = groups
self.conv_layer = torch.nn.Conv2d(
in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0),
out_channels=out_channels * 2,
kernel_size=1,
stride=1,
padding=0,
groups=self.groups,
bias=False,
)
self.relu = torch.nn.ReLU(inplace=False)
# squeeze and excitation block
self.use_se = use_se
if use_se:
if se_kwargs is None:
se_kwargs = {}
self.se = SELayer(self.conv_layer.in_channels, **se_kwargs)
self.spatial_scale_factor = spatial_scale_factor
self.spatial_scale_mode = spatial_scale_mode
self.spectral_pos_encoding = spectral_pos_encoding
self.ffc3d = ffc3d
self.fft_norm = fft_norm
def forward(self, x):
batch = x.shape[0]
if self.spatial_scale_factor is not None:
orig_size = x.shape[-2:]
x = F.interpolate(
x,
scale_factor=self.spatial_scale_factor,
mode=self.spatial_scale_mode,
align_corners=False,
)
r_size = x.size()
# (batch, c, h, w/2+1, 2)
fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1)
ffted = fft.rfftn(x, dim=fft_dim, norm=self.fft_norm)
ffted = torch.stack((ffted.real, ffted.imag), dim=-1)
ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1)
ffted = ffted.view(
(
batch,
-1,
)
+ ffted.size()[3:]
)
if self.spectral_pos_encoding:
height, width = ffted.shape[-2:]
coords_vert = (
torch.linspace(0, 1, height)[None, None, :, None]
.expand(batch, 1, height, width)
.to(ffted)
)
coords_hor = (
torch.linspace(0, 1, width)[None, None, None, :]
.expand(batch, 1, height, width)
.to(ffted)
)
ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1)
if self.use_se:
ffted = self.se(ffted)
ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1)
ffted = self.relu(ffted)
ffted = (
ffted.view(
(
batch,
-1,
2,
)
+ ffted.size()[2:]
)
.permute(0, 1, 3, 4, 2)
.contiguous()
) # (batch,c, t, h, w/2+1, 2)
ffted = torch.complex(ffted[..., 0], ffted[..., 1])
ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:]
output = torch.fft.irfftn(
ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm
)
if self.spatial_scale_factor is not None:
output = F.interpolate(
output,
size=orig_size,
mode=self.spatial_scale_mode,
align_corners=False,
)
return output
class SpectralTransform(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride=1,
groups=1,
enable_lfu=True,
**fu_kwargs,
):
# bn_layer not used
super(SpectralTransform, self).__init__()
self.enable_lfu = enable_lfu
if stride == 2:
self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2)
else:
self.downsample = nn.Identity()
self.stride = stride
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels, out_channels // 2, kernel_size=1, groups=groups, bias=False
),
# nn.BatchNorm2d(out_channels // 2),
nn.ReLU(inplace=True),
)
self.fu = FourierUnit(out_channels // 2, out_channels // 2, groups, **fu_kwargs)
if self.enable_lfu:
self.lfu = FourierUnit(out_channels // 2, out_channels // 2, groups)
self.conv2 = torch.nn.Conv2d(
out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False
)
def forward(self, x):
x = self.downsample(x)
x = self.conv1(x)
output = self.fu(x)
if self.enable_lfu:
n, c, h, w = x.shape
split_no = 2
split_s = h // split_no
xs = torch.cat(
torch.split(x[:, : c // 4], split_s, dim=-2), dim=1
).contiguous()
xs = torch.cat(torch.split(xs, split_s, dim=-1), dim=1).contiguous()
xs = self.lfu(xs)
xs = xs.repeat(1, 1, split_no, split_no).contiguous()
else:
xs = 0
output = self.conv2(x + output + xs)
return output
class FFC(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
enable_lfu=True,
padding_type="reflect",
gated=False,
**spectral_kwargs,
):
super(FFC, self).__init__()
assert stride == 1 or stride == 2, "Stride should be 1 or 2."
self.stride = stride
in_cg = int(in_channels * ratio_gin)
in_cl = in_channels - in_cg
out_cg = int(out_channels * ratio_gout)
out_cl = out_channels - out_cg
# groups_g = 1 if groups == 1 else int(groups * ratio_gout)
# groups_l = 1 if groups == 1 else groups - groups_g
self.ratio_gin = ratio_gin
self.ratio_gout = ratio_gout
self.global_in_num = in_cg
module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d
self.convl2l = module(
in_cl,
out_cl,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d
self.convl2g = module(
in_cl,
out_cg,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d
self.convg2l = module(
in_cg,
out_cl,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode=padding_type,
)
module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform
self.convg2g = module(
in_cg,
out_cg,
stride,
1 if groups == 1 else groups // 2,
enable_lfu,
**spectral_kwargs,
)
self.gated = gated
module = (
nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d
)
self.gate = module(in_channels, 2, 1)
def forward(self, x, fname=None):
x_l, x_g = x if type(x) is tuple else (x, 0)
out_xl, out_xg = 0, 0
if self.gated:
total_input_parts = [x_l]
if torch.is_tensor(x_g):
total_input_parts.append(x_g)
total_input = torch.cat(total_input_parts, dim=1)
gates = torch.sigmoid(self.gate(total_input))
g2l_gate, l2g_gate = gates.chunk(2, dim=1)
else:
g2l_gate, l2g_gate = 1, 1
spec_x = self.convg2g(x_g)
if self.ratio_gout != 1:
out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate
if self.ratio_gout != 0:
out_xg = self.convl2g(x_l) * l2g_gate + spec_x
return out_xl, out_xg
class FFC_BN_ACT(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
norm_layer=nn.SyncBatchNorm,
activation_layer=nn.Identity,
padding_type="reflect",
enable_lfu=True,
**kwargs,
):
super(FFC_BN_ACT, self).__init__()
self.ffc = FFC(
in_channels,
out_channels,
kernel_size,
ratio_gin,
ratio_gout,
stride,
padding,
dilation,
groups,
bias,
enable_lfu,
padding_type=padding_type,
**kwargs,
)
lnorm = nn.Identity if ratio_gout == 1 else norm_layer
gnorm = nn.Identity if ratio_gout == 0 else norm_layer
global_channels = int(out_channels * ratio_gout)
# self.bn_l = lnorm(out_channels - global_channels)
# self.bn_g = gnorm(global_channels)
lact = nn.Identity if ratio_gout == 1 else activation_layer
gact = nn.Identity if ratio_gout == 0 else activation_layer
self.act_l = lact(inplace=True)
self.act_g = gact(inplace=True)
def forward(self, x, fname=None):
x_l, x_g = self.ffc(
x,
fname=fname,
)
x_l = self.act_l(x_l)
x_g = self.act_g(x_g)
return x_l, x_g
class FFCResnetBlock(nn.Module):
def __init__(
self,
dim,
padding_type,
norm_layer,
activation_layer=nn.ReLU,
dilation=1,
spatial_transform_kwargs=None,
inline=False,
ratio_gin=0.75,
ratio_gout=0.75,
):
super().__init__()
self.conv1 = FFC_BN_ACT(
dim,
dim,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.conv2 = FFC_BN_ACT(
dim,
dim,
kernel_size=3,
padding=dilation,
dilation=dilation,
norm_layer=norm_layer,
activation_layer=activation_layer,
padding_type=padding_type,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.inline = inline
def forward(self, x, fname=None):
if self.inline:
x_l, x_g = (
x[:, : -self.conv1.ffc.global_in_num],
x[:, -self.conv1.ffc.global_in_num :],
)
else:
x_l, x_g = x if type(x) is tuple else (x, 0)
id_l, id_g = x_l, x_g
x_l, x_g = self.conv1((x_l, x_g), fname=fname)
x_l, x_g = self.conv2((x_l, x_g), fname=fname)
x_l, x_g = id_l + x_l, id_g + x_g
out = x_l, x_g
if self.inline:
out = torch.cat(out, dim=1)
return out
class ConcatTupleLayer(nn.Module):
def forward(self, x):
assert isinstance(x, tuple)
x_l, x_g = x
assert torch.is_tensor(x_l) or torch.is_tensor(x_g)
if not torch.is_tensor(x_g):
return x_l
return torch.cat(x, dim=1)
class FFCBlock(torch.nn.Module):
def __init__(
self,
dim, # Number of output/input channels.
kernel_size, # Width and height of the convolution kernel.
padding,
ratio_gin=0.75,
ratio_gout=0.75,
activation="linear", # Activation function: 'relu', 'lrelu', etc.
):
super().__init__()
if activation == "linear":
self.activation = nn.Identity
else:
self.activation = nn.ReLU
self.padding = padding
self.kernel_size = kernel_size
self.ffc_block = FFCResnetBlock(
dim=dim,
padding_type="reflect",
norm_layer=nn.SyncBatchNorm,
activation_layer=self.activation,
dilation=1,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
self.concat_layer = ConcatTupleLayer()
def forward(self, gen_ft, mask, fname=None):
x = gen_ft.float()
x_l, x_g = (
x[:, : -self.ffc_block.conv1.ffc.global_in_num],
x[:, -self.ffc_block.conv1.ffc.global_in_num :],
)
id_l, id_g = x_l, x_g
x_l, x_g = self.ffc_block((x_l, x_g), fname=fname)
x_l, x_g = id_l + x_l, id_g + x_g
x = self.concat_layer((x_l, x_g))
return x + gen_ft.float()
class FFCSkipLayer(torch.nn.Module):
def __init__(
self,
dim, # Number of input/output channels.
kernel_size=3, # Convolution kernel size.
ratio_gin=0.75,
ratio_gout=0.75,
):
super().__init__()
self.padding = kernel_size // 2
self.ffc_act = FFCBlock(
dim=dim,
kernel_size=kernel_size,
activation=nn.ReLU,
padding=self.padding,
ratio_gin=ratio_gin,
ratio_gout=ratio_gout,
)
def forward(self, gen_ft, mask, fname=None):
x = self.ffc_act(gen_ft, mask, fname=fname)
return x
class SynthesisBlock(torch.nn.Module):
def __init__(
self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture="skip", # Architecture: 'orig', 'skip', 'resnet'.
resample_filter=[
1,
3,
3,
1,
], # Low-pass filter to apply when resampling activations.
conv_clamp=None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16=False, # Use FP16 for this block?
fp16_channels_last=False, # Use channels-last memory format with FP16?
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ["orig", "skip", "resnet"]
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = use_fp16 and fp16_channels_last
self.register_buffer("resample_filter", setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
self.res_ffc = {4: 0, 8: 0, 16: 0, 32: 1, 64: 1, 128: 1, 256: 1, 512: 1}
if in_channels != 0 and resolution >= 8:
self.ffc_skip = nn.ModuleList()
for _ in range(self.res_ffc[resolution]):
self.ffc_skip.append(FFCSkipLayer(dim=out_channels))
if in_channels == 0:
self.const = torch.nn.Parameter(
torch.randn([out_channels, resolution, resolution])
)
if in_channels != 0:
self.conv0 = SynthesisLayer(
in_channels,
out_channels,
w_dim=w_dim * 3,
resolution=resolution,
up=2,
resample_filter=resample_filter,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
**layer_kwargs,
)
self.num_conv += 1
self.conv1 = SynthesisLayer(
out_channels,
out_channels,
w_dim=w_dim * 3,
resolution=resolution,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
**layer_kwargs,
)
self.num_conv += 1
if is_last or architecture == "skip":
self.torgb = ToRGBLayer(
out_channels,
img_channels,
w_dim=w_dim * 3,
conv_clamp=conv_clamp,
channels_last=self.channels_last,
)
self.num_torgb += 1
if in_channels != 0 and architecture == "resnet":
self.skip = Conv2dLayer(
in_channels,
out_channels,
kernel_size=1,
bias=False,
up=2,
resample_filter=resample_filter,
channels_last=self.channels_last,
)
def forward(
self,
x,
mask,
feats,
img,
ws,
fname=None,
force_fp32=False,
fused_modconv=None,
**layer_kwargs,
):
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
dtype = torch.float32
memory_format = (
torch.channels_last
if self.channels_last and not force_fp32
else torch.contiguous_format
)
if fused_modconv is None:
fused_modconv = (not self.training) and (
dtype == torch.float32 or int(x.shape[0]) == 1
)
x = x.to(dtype=dtype, memory_format=memory_format)
x_skip = (
feats[self.resolution].clone().to(dtype=dtype, memory_format=memory_format)
)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, ws[1], fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == "resnet":
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(
x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
if len(self.ffc_skip) > 0:
mask = F.interpolate(
mask,
size=x_skip.shape[2:],
)
z = x + x_skip
for fres in self.ffc_skip:
z = fres(z, mask)
x = x + z
else:
x = x + x_skip
x = self.conv1(
x,
ws[1].clone(),
fused_modconv=fused_modconv,
gain=np.sqrt(0.5),
**layer_kwargs,
)
x = y.add_(x)
else:
x = self.conv0(
x, ws[0].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
if len(self.ffc_skip) > 0:
mask = F.interpolate(
mask,
size=x_skip.shape[2:],
)
z = x + x_skip
for fres in self.ffc_skip:
z = fres(z, mask)
x = x + z
else:
x = x + x_skip
x = self.conv1(
x, ws[1].clone(), fused_modconv=fused_modconv, **layer_kwargs
)
# ToRGB.
if img is not None:
img = upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == "skip":
y = self.torgb(x, ws[2].clone(), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
x = x.to(dtype=dtype)
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
class SynthesisNetwork(torch.nn.Module):
def __init__(
self,
w_dim, # Intermediate latent (W) dimensionality.
z_dim, # Output Latent (Z) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base=16384, # Overall multiplier for the number of channels.
channel_max=512, # Maximum number of channels in any layer.
num_fp16_res=0, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [
2**i for i in range(3, self.img_resolution_log2 + 1)
]
channels_dict = {
res: min(channel_base // res, channel_max) for res in self.block_resolutions
}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.foreword = SynthesisForeword(
img_channels=img_channels,
in_channels=min(channel_base // 4, channel_max),
z_dim=z_dim * 2,
resolution=4,
)
self.num_ws = self.img_resolution_log2 * 2 - 2
for res in self.block_resolutions:
if res // 2 in channels_dict.keys():
in_channels = channels_dict[res // 2] if res > 4 else 0
else:
in_channels = min(channel_base // (res // 2), channel_max)
out_channels = channels_dict[res]
use_fp16 = res >= fp16_resolution
use_fp16 = False
is_last = res == self.img_resolution
block = SynthesisBlock(
in_channels,
out_channels,
w_dim=w_dim,
resolution=res,
img_channels=img_channels,
is_last=is_last,
use_fp16=use_fp16,
**block_kwargs,
)
setattr(self, f"b{res}", block)
def forward(self, x_global, mask, feats, ws, fname=None, **block_kwargs):
img = None
x, img = self.foreword(x_global, ws, feats, img)
for res in self.block_resolutions:
block = getattr(self, f"b{res}")
mod_vector0 = []
mod_vector0.append(ws[:, int(np.log2(res)) * 2 - 5])
mod_vector0.append(x_global.clone())
mod_vector0 = torch.cat(mod_vector0, dim=1)
mod_vector1 = []
mod_vector1.append(ws[:, int(np.log2(res)) * 2 - 4])
mod_vector1.append(x_global.clone())
mod_vector1 = torch.cat(mod_vector1, dim=1)
mod_vector_rgb = []
mod_vector_rgb.append(ws[:, int(np.log2(res)) * 2 - 3])
mod_vector_rgb.append(x_global.clone())
mod_vector_rgb = torch.cat(mod_vector_rgb, dim=1)
x, img = block(
x,
mask,
feats,
img,
(mod_vector0, mod_vector1, mod_vector_rgb),
fname=fname,
**block_kwargs,
)
return img
class MappingNetwork(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers=8, # Number of mapping layers.
embed_features=None, # Label embedding dimensionality, None = same as w_dim.
layer_features=None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation="lrelu", # Activation function: 'relu', 'lrelu', etc.
lr_multiplier=0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta=0.995, # Decay for tracking the moving average of W during training, None = do not track.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = (
[z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
)
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(
in_features,
out_features,
activation=activation,
lr_multiplier=lr_multiplier,
)
setattr(self, f"fc{idx}", layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer("w_avg", torch.zeros([w_dim]))
def forward(
self, z, c, truncation_psi=1, truncation_cutoff=None, skip_w_avg_update=False
):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function("input"):
if self.z_dim > 0:
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f"fc{idx}")
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and self.training and not skip_w_avg_update:
with torch.autograd.profiler.record_function("update_w_avg"):
self.w_avg.copy_(
x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)
)
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function("broadcast"):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function("truncate"):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(
x[:, :truncation_cutoff], truncation_psi
)
return x
class Generator(torch.nn.Module):
def __init__(
self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
encoder_kwargs={}, # Arguments for EncoderNetwork.
mapping_kwargs={}, # Arguments for MappingNetwork.
synthesis_kwargs={}, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.encoder = EncoderNetwork(
c_dim=c_dim,
z_dim=z_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**encoder_kwargs,
)
self.synthesis = SynthesisNetwork(
z_dim=z_dim,
w_dim=w_dim,
img_resolution=img_resolution,
img_channels=img_channels,
**synthesis_kwargs,
)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(
z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs
)
def forward(
self,
img,
c,
fname=None,
truncation_psi=1,
truncation_cutoff=None,
**synthesis_kwargs,
):
mask = img[:, -1].unsqueeze(1)
x_global, z, feats = self.encoder(img, c)
ws = self.mapping(
z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff
)
img = self.synthesis(x_global, mask, feats, ws, fname=fname, **synthesis_kwargs)
return img
FCF_MODEL_URL = os.environ.get(
"FCF_MODEL_URL",
"https://github.com/Sanster/models/releases/download/add_fcf/places_512_G.pth",
)
FCF_MODEL_MD5 = os.environ.get("FCF_MODEL_MD5", "3323152bc01bf1c56fd8aba74435a211")
class FcF(InpaintModel):
name = "fcf"
min_size = 512
pad_mod = 512
pad_to_square = True
def init_model(self, device, **kwargs):
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
kwargs = {
"channel_base": 1 * 32768,
"channel_max": 512,
"num_fp16_res": 4,
"conv_clamp": 256,
}
G = Generator(
z_dim=512,
c_dim=0,
w_dim=512,
img_resolution=512,
img_channels=3,
synthesis_kwargs=kwargs,
encoder_kwargs=kwargs,
mapping_kwargs={"num_layers": 2},
)
self.model = load_model(G, FCF_MODEL_URL, device, FCF_MODEL_MD5)
self.label = torch.zeros([1, self.model.c_dim], device=device)
@staticmethod
def is_downloaded() -> bool:
return os.path.exists(get_cache_path_by_url(FCF_MODEL_URL))
@torch.no_grad()
def __call__(self, image, mask, config: Config):
"""
images: [H, W, C] RGB, not normalized
masks: [H, W]
return: BGR IMAGE
"""
if image.shape[0] == 512 and image.shape[1] == 512:
return self._pad_forward(image, mask, config)
boxes = boxes_from_mask(mask)
crop_result = []
config.hd_strategy_crop_margin = 128
for box in boxes:
crop_image, crop_mask, crop_box = self._crop_box(image, mask, box, config)
origin_size = crop_image.shape[:2]
resize_image = resize_max_size(crop_image, size_limit=512)
resize_mask = resize_max_size(crop_mask, size_limit=512)
inpaint_result = self._pad_forward(resize_image, resize_mask, config)
# only paste masked area result
inpaint_result = cv2.resize(
inpaint_result,
(origin_size[1], origin_size[0]),
interpolation=cv2.INTER_CUBIC,
)
original_pixel_indices = crop_mask < 127
inpaint_result[original_pixel_indices] = crop_image[:, :, ::-1][
original_pixel_indices
]
crop_result.append((inpaint_result, crop_box))
inpaint_result = image[:, :, ::-1]
for crop_image, crop_box in crop_result:
x1, y1, x2, y2 = crop_box
inpaint_result[y1:y2, x1:x2, :] = crop_image
return inpaint_result
def forward(self, image, mask, config: Config):
"""Input images and output images have same size
images: [H, W, C] RGB
masks: [H, W] mask area == 255
return: BGR IMAGE
"""
image = norm_img(image) # [0, 1]
image = image * 2 - 1 # [0, 1] -> [-1, 1]
mask = (mask > 120) * 255
mask = norm_img(mask)
image = torch.from_numpy(image).unsqueeze(0).to(self.device)
mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
erased_img = image * (1 - mask)
input_image = torch.cat([0.5 - mask, erased_img], dim=1)
output = self.model(
input_image, self.label, truncation_psi=0.1, noise_mode="none"
)
output = (
(output.permute(0, 2, 3, 1) * 127.5 + 127.5)
.round()
.clamp(0, 255)
.to(torch.uint8)
)
output = output[0].cpu().numpy()
cur_res = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return cur_res
| 57,098 | 31.929066 | 124 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/sd.py | import gc
import PIL.Image
import cv2
import numpy as np
import torch
from loguru import logger
from lama_cleaner.model.base import DiffusionInpaintModel
from lama_cleaner.model.utils import torch_gc, get_scheduler
from lama_cleaner.schema import Config
class CPUTextEncoderWrapper:
def __init__(self, text_encoder, torch_dtype):
self.config = text_encoder.config
self.text_encoder = text_encoder.to(torch.device("cpu"), non_blocking=True)
self.text_encoder = self.text_encoder.to(torch.float32, non_blocking=True)
self.torch_dtype = torch_dtype
del text_encoder
torch_gc()
def __call__(self, x, **kwargs):
input_device = x.device
return [
self.text_encoder(x.to(self.text_encoder.device), **kwargs)[0]
.to(input_device)
.to(self.torch_dtype)
]
@property
def dtype(self):
return self.torch_dtype
def load_from_local_model(local_model_path, torch_dtype, disable_nsfw=True):
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
download_from_original_stable_diffusion_ckpt,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
logger.info(f"Converting {local_model_path} to diffusers pipeline")
pipe = download_from_original_stable_diffusion_ckpt(
local_model_path,
num_in_channels=9,
from_safetensors=local_model_path.endswith("safetensors"),
device="cpu",
)
inpaint_pipe = StableDiffusionInpaintPipeline(
vae=pipe.vae,
text_encoder=pipe.text_encoder,
tokenizer=pipe.tokenizer,
unet=pipe.unet,
scheduler=pipe.scheduler,
safety_checker=None if disable_nsfw else pipe.safety_checker,
feature_extractor=None if disable_nsfw else pipe.safety_checker,
requires_safety_checker=not disable_nsfw,
)
del pipe
gc.collect()
return inpaint_pipe.to(torch_dtype=torch_dtype)
class SD(DiffusionInpaintModel):
pad_mod = 8
min_size = 512
def init_model(self, device: torch.device, **kwargs):
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
fp16 = not kwargs.get("no_half", False)
model_kwargs = {
"local_files_only": kwargs.get("local_files_only", kwargs["sd_run_local"])
}
if kwargs["disable_nsfw"] or kwargs.get("cpu_offload", False):
logger.info("Disable Stable Diffusion Model NSFW checker")
model_kwargs.update(
dict(
safety_checker=None,
feature_extractor=None,
requires_safety_checker=False,
)
)
use_gpu = device == torch.device("cuda") and torch.cuda.is_available()
torch_dtype = torch.float16 if use_gpu and fp16 else torch.float32
if kwargs.get("sd_local_model_path", None):
self.model = load_from_local_model(
kwargs["sd_local_model_path"],
torch_dtype=torch_dtype,
)
else:
self.model = StableDiffusionInpaintPipeline.from_pretrained(
self.model_id_or_path,
revision="fp16" if use_gpu and fp16 else "main",
torch_dtype=torch_dtype,
use_auth_token=kwargs["hf_access_token"],
**model_kwargs,
)
# https://huggingface.co/docs/diffusers/v0.7.0/en/api/pipelines/stable_diffusion#diffusers.StableDiffusionInpaintPipeline.enable_attention_slicing
self.model.enable_attention_slicing()
# https://huggingface.co/docs/diffusers/v0.7.0/en/optimization/fp16#memory-efficient-attention
if kwargs.get("enable_xformers", False):
self.model.enable_xformers_memory_efficient_attention()
if kwargs.get("cpu_offload", False) and use_gpu:
# TODO: gpu_id
logger.info("Enable sequential cpu offload")
self.model.enable_sequential_cpu_offload(gpu_id=0)
else:
self.model = self.model.to(device)
if kwargs["sd_cpu_textencoder"]:
logger.info("Run Stable Diffusion TextEncoder on CPU")
self.model.text_encoder = CPUTextEncoderWrapper(
self.model.text_encoder, torch_dtype
)
self.callback = kwargs.pop("callback", None)
def forward(self, image, mask, config: Config):
"""Input image and output image have same size
image: [H, W, C] RGB
mask: [H, W, 1] 255 means area to repaint
return: BGR IMAGE
"""
scheduler_config = self.model.scheduler.config
scheduler = get_scheduler(config.sd_sampler, scheduler_config)
self.model.scheduler = scheduler
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)[:, :, np.newaxis]
img_h, img_w = image.shape[:2]
output = self.model(
image=PIL.Image.fromarray(image),
prompt=config.prompt,
negative_prompt=config.negative_prompt,
mask_image=PIL.Image.fromarray(mask[:, :, -1], mode="L"),
num_inference_steps=config.sd_steps,
guidance_scale=config.sd_guidance_scale,
output_type="np.array",
callback=self.callback,
height=img_h,
width=img_w,
generator=torch.manual_seed(config.sd_seed),
).images[0]
output = (output * 255).round().astype("uint8")
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
return output
def forward_post_process(self, result, image, mask, config):
if config.sd_match_histograms:
result = self._match_histograms(result, image[:, :, ::-1], mask)
if config.sd_mask_blur != 0:
k = 2 * config.sd_mask_blur + 1
mask = cv2.GaussianBlur(mask, (k, k), 0)
return result, image, mask
@staticmethod
def is_downloaded() -> bool:
# model will be downloaded when app start, and can't switch in frontend settings
return True
class SD15(SD):
name = "sd1.5"
model_id_or_path = "runwayml/stable-diffusion-inpainting"
class Anything4(SD):
name = "anything4"
model_id_or_path = "Sanster/anything-4.0-inpainting"
class RealisticVision14(SD):
name = "realisticVision1.4"
model_id_or_path = "Sanster/Realistic_Vision_V1.4-inpainting"
class SD2(SD):
name = "sd2"
model_id_or_path = "stabilityai/stable-diffusion-2-inpainting"
| 6,644 | 33.252577 | 154 | py |
lama-cleaner | lama-cleaner-main/lama_cleaner/model/pipeline/pipeline_stable_diffusion_controlnet_inpaint.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copy from https://github.com/mikonvergence/ControlNetInpaint/blob/main/src/pipeline_stable_diffusion_controlnet_inpaint.py
import torch
import PIL.Image
import numpy as np
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import *
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> # !pip install opencv-python transformers accelerate
>>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler
>>> from diffusers.utils import load_image
>>> import numpy as np
>>> import torch
>>> import cv2
>>> from PIL import Image
>>> # download an image
>>> image = load_image(
... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
... )
>>> image = np.array(image)
>>> mask_image = load_image(
... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
... )
>>> mask_image = np.array(mask_image)
>>> # get canny image
>>> canny_image = cv2.Canny(image, 100, 200)
>>> canny_image = canny_image[:, :, None]
>>> canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
>>> canny_image = Image.fromarray(canny_image)
>>> # load control net and stable diffusion v1-5
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
... "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16
... )
>>> # speed up diffusion process with faster scheduler and memory optimization
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
>>> # remove following line if xformers is not installed
>>> pipe.enable_xformers_memory_efficient_attention()
>>> pipe.enable_model_cpu_offload()
>>> # generate image
>>> generator = torch.manual_seed(0)
>>> image = pipe(
... "futuristic-looking doggo",
... num_inference_steps=20,
... generator=generator,
... image=image,
... control_image=canny_image,
... mask_image=mask_image
... ).images[0]
```
"""
def prepare_mask_and_masked_image(image, mask):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
raise TypeError(
f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not"
)
# Batch single image
if image.ndim == 3:
assert (
image.shape[0] == 3
), "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
assert (
image.ndim == 4 and mask.ndim == 4
), "Image and Mask must have 4 dimensions"
assert (
image.shape[-2:] == mask.shape[-2:]
), "Image and Mask must have the same spatial dimensions"
assert (
image.shape[0] == mask.shape[0]
), "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(
f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not"
)
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
# preprocess mask
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = np.concatenate(
[np.array(m.convert("L"))[None, None, :] for m in mask], axis=0
)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = image * (mask < 0.5)
return mask, masked_image
class StableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetPipeline):
r"""
Pipeline for text-guided image inpainting using Stable Diffusion with ControlNet guidance.
This model inherits from [`StableDiffusionControlNetPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
controlnet ([`ControlNetModel`]):
Provides additional conditioning to the unet during the denoising process
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPFeatureExtractor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def prepare_mask_latents(
self,
mask,
masked_image,
batch_size,
height,
width,
dtype,
device,
generator,
do_classifier_free_guidance,
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
masked_image = masked_image.to(device=device, dtype=dtype)
# encode the mask image into latents space so we can concatenate it to the latents
if isinstance(generator, list):
masked_image_latents = [
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(
generator=generator[i]
)
for i in range(batch_size)
]
masked_image_latents = torch.cat(masked_image_latents, dim=0)
else:
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(
generator=generator
)
masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(
batch_size // masked_image_latents.shape[0], 1, 1, 1
)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = (
torch.cat([masked_image_latents] * 2)
if do_classifier_free_guidance
else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.FloatTensor, PIL.Image.Image] = None,
control_image: Union[
torch.FloatTensor,
PIL.Image.Image,
List[torch.FloatTensor],
List[PIL.Image.Image],
] = None,
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: float = 1.0,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
also be accepted as an image. The control image is automatically resized to fit the output image.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original unet.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
# 0. Default height and width to unet
height, width = self._default_height_width(height, width, control_image)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
control_image,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare image
control_image = self.prepare_image(
control_image,
width,
height,
batch_size * num_images_per_prompt,
num_images_per_prompt,
device,
self.controlnet.dtype,
)
if do_classifier_free_guidance:
control_image = torch.cat([control_image] * 2)
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 6. Prepare latent variables
num_channels_latents = self.controlnet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# EXTRA: prepare mask latents
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance,
)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = (
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
)
latent_model_input = self.scheduler.scale_model_input(
latent_model_input, t
)
down_block_res_samples, mid_block_res_sample = self.controlnet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
controlnet_cond=control_image,
return_dict=False,
)
down_block_res_samples = [
down_block_res_sample * controlnet_conditioning_scale
for down_block_res_sample in down_block_res_samples
]
mid_block_res_sample *= controlnet_conditioning_scale
# predict the noise residual
latent_model_input = torch.cat(
[latent_model_input, mask, masked_image_latents], dim=1
)
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (
noise_pred_text - noise_pred_uncond
)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred, t, latents, **extra_step_kwargs
).prev_sample
# call the callback, if provided
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
# If we do sequential model offloading, let's offload unet and controlnet
# manually for max memory savings
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.unet.to("cpu")
self.controlnet.to("cpu")
torch.cuda.empty_cache()
if output_type == "latent":
image = latents
has_nsfw_concept = None
elif output_type == "pil":
# 8. Post-processing
image = self.decode_latents(latents)
# 9. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(
image, device, prompt_embeds.dtype
)
# 10. Convert to PIL
image = self.numpy_to_pil(image)
else:
# 8. Post-processing
image = self.decode_latents(latents)
# 9. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(
image, device, prompt_embeds.dtype
)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(
images=image, nsfw_content_detected=has_nsfw_concept
)
| 28,155 | 47.047782 | 146 | py |
pygcn | pygcn-master/setup.py | from setuptools import setup
from setuptools import find_packages
setup(name='pygcn',
version='0.1',
description='Graph Convolutional Networks in PyTorch',
author='Thomas Kipf',
author_email='thomas.kipf@gmail.com',
url='https://tkipf.github.io',
download_url='https://github.com/tkipf/pygcn',
license='MIT',
install_requires=['numpy',
'torch',
'scipy'
],
package_data={'pygcn': ['README.md']},
packages=find_packages()) | 553 | 31.588235 | 60 | py |
pygcn | pygcn-master/pygcn/utils.py | import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_data(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
| 2,848 | 34.17284 | 78 | py |
pygcn | pygcn-master/pygcn/layers.py | import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 1,297 | 29.186047 | 77 | py |
pygcn | pygcn-master/pygcn/models.py | import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
| 541 | 27.526316 | 62 | py |
pygcn | pygcn-master/pygcn/train.py | from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from pygcn.utils import load_data, accuracy
from pygcn.models import GCN
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
# Model and optimizer
model = GCN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=labels.max().item() + 1,
dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
output = model(features, adj)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
if not args.fastmode:
# Evaluate validation set performance separately,
# deactivates dropout during validation run.
model.eval()
output = model(features, adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
def test():
model.eval()
output = model(features, adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
# Train model
t_total = time.time()
for epoch in range(args.epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
test()
| 3,427 | 31.037383 | 72 | py |
r_em | r_em-master/training/nn_fcns_local.py | #-*- coding: utf-8 -*-
"""
Created on Sun Feb 17 22:30:18 2019
__author__ = "Ivan Lobato"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append('E:/Neural_network/nt_tf_lib')
sys.path.append('/media/hdd_1/nt_tf_lib')
sys.path.append('/ceph/users/gbz64553/data/Neural_network/nt_tf_lib')
#########################################################################################
import tensorflow as tf
#########################################################################################
import nn_var_glob as nvgb
import nn_fcns_gen as nfge
import nn_fcns_lays as nfly
import nn_fcns_nets as nnet
import nn_fcns_losses as nfls
import nn_fcns_callbacks as nfcb
import nn_fcns_optimizers as nfop
#########################################################################################
x_typ_mat = [np.uint16]
y_typ_mat = [np.uint16]
x_typ_rtf = [tf.uint16]
y_typ_rtf = [tf.uint16]
x_typ_tf = [tf.float32]
y_typ_tf = [tf.float32]
x_ndim = [4]
y_ndim = [4]
#########################################################################################
EE_STD_X = 0.1
EE_W_Y = 0.1
EE_STD_LCN = 0.1
GRAD_MAX = 100.00
#########################################################################################
# multi-local constrast normalization loss: I found out a proportion between mean(MLN(2+4+8+16))/L1 = 2.62 for EE_STD = 0.1
MLWT_KSZ = [2, 4, 8, 16]
MLWT_WGTL = np.array([1.0, 1.33, 1.66, 2.0], np.float32)
MLWT_WGTL /= MLWT_WGTL.sum()
#########################################################################################
FFT_N = 0.1250
FFT_FR = 2.0*np.power(256.0, -FFT_N)
#########################################################################################
# DOWNSCALING factor
DS_FTR = 1
RS_0 = 64
RS_E = 192
V_MIN = 1.0e-9
R_FTR_OPT = 0 # 0: Deactivated, 1:Fix value, 2:Increasing, 3:Random
R_FTR_VAL = 1.0
#########################################################################################
# Loss normalization
LOSS_NORM = True
MATLAB_EXPORT = False
#########################################################################################
GAN_TYP = 0 # 0:deactivated, 1: Active
# 0: gan - gen
# 1: gan - disc
# 2: L1
# 3: L2
# 4: L1 - MLWT
# 5: L1 - FFT
# 6: mean
# 7: std
#########################################################################################
#################################################### 0, 1, 2, 3 , 4, 5, 6, 7
LOSS_TYP, LOSS_WGT_TYP = nfge.fcn_loss_type_weight([5.0e-4, 1.0000, 10.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], 1.0)
#########################################################################################
###################################### database #########################################
#########################################################################################
def fcn_g_mean_std(y, axis, std_min):
y_mean, y_var = tf.nn.moments(y, axes=axis, keepdims=True)
y_std = tf.math.maximum(tf.sqrt(y_var), std_min)
return y_mean, y_std
def fcn_wt_x(x, axis, std_min):
x_sft, x_sc = fcn_g_mean_std(x, axis, std_min)
x_t = (x - x_sft)/x_sc
return x_t
def fcn_wt_xy(x, y, axis, std_min):
x_sft, x_sc = fcn_g_mean_std(x, axis, std_min)
x_t = (x - x_sft)/x_sc
y_t = (y - x_sft)/x_sc
return x_t, y_t
#########################################################################################
def fcn_scale_inputs_x_y(x, y, axis):
sft, sc = fcn_g_mean_std(x, axis, EE_STD_X)
x = (x - sft)/sc
y = (y - sft)/sc
return x, y
#########################################################################################
@tf.function
def fcn_ds_map(record_string, bb_norm, bb_aug):
data = nfge.fcn_ds_parse_sgl_ex_string_string(record_string)
x = nfge.fcn_string_dc(data['x'], x_typ_rtf[0], x_typ_tf[0])
x = nfge.fcn_x_reshape(x)
y = nfge.fcn_string_dc(data['y'], y_typ_rtf[0], y_typ_tf[0])
y = nfge.fcn_y_reshape(y)
if bb_norm:
x, y = fcn_scale_inputs_x_y(x, y, [0, 1])
return x, y
#########################################################################################
if R_FTR_OPT==0:
def fcn_x(x, y, ibg, ibg_m):
return x
elif R_FTR_OPT==1:
def fcn_x(x, y, ibg, ibg_m):
r = R_FTR_VAL
x_op = tf.expand_dims(x, axis=-1)
x_op = y + r*(x_op - y)
return x_op
elif R_FTR_OPT==2:
def fcn_x(x, y, ibg, ibg_m):
r = tf.minimum(1.0, tf.maximum(0.0, ibg/ibg_m))
x_op = tf.expand_dims(x, axis=-1)
x_op = y + r*(x_op - y)
return x_op
else:
def fcn_x(x, y, ibg, ibg_m):
r = tf.random.uniform((), dtype=tf.float32)
x_op = tf.expand_dims(x[..., 0], axis=-1)
x_op = y + r*(x_op - y)
return x_op
def fcn_gn(x, sigma, ftr_min, ftr_max):
r = tf.random.uniform(tf.shape(sigma), minval=ftr_min, maxval=ftr_max, dtype=x_typ_tf[0], seed=2000)
return x + tf.random.normal(tf.shape(x), 0.0, r*sigma, dtype=x_typ_tf[0])
def fcn_input_disc(x, y, y_p):
bb_noise = tf.less(tf.random.uniform((), dtype=tf.float32, seed=2001), 0.50)
if bb_noise:
sigma = tf.maximum(0.01, tf.math.reduce_std(y, axis=[1, 2], keepdims=True))
y = fcn_gn(y, sigma, 0.001, 0.10)
y_p = fcn_gn(y_p, sigma, 0.001, 0.10)
return tf.concat([x, y], axis=-1), tf.concat([x, y_p], axis=-1)
if nvgb.isone(FFT_N):
def fcn_pow_fft_n(y):
return y
elif nvgb.iszero(FFT_N-0.5):
def fcn_pow_fft_n(y):
return tf.sqrt(y)
else:
def fcn_pow_fft_n(y):
return tf.pow(tf.math.maximum(V_MIN, y), FFT_N)
def fcn_sft_std(x, kn_sz):
# local constrast normalization
x_sft = tf.nn.avg_pool2d(x, kn_sz, strides=(1, 1), padding='SAME')
x_std = tf.nn.avg_pool2d(tf.math.squared_difference(x, x_sft), kn_sz, strides=(1, 1), padding='SAME')
x_std = tf.math.maximum(tf.sqrt(x_std), EE_STD_LCN)
# # Gaussian smoothing
# x_sft = tfa.image.gaussian_filter2d(x_sft, filter_shape=2*kn_sz[0], sigma = 0.5*kn_sz[0], padding='REFLECT')
# x_std = tfa.image.gaussian_filter2d(x_std, filter_shape=2*kn_sz[0], sigma = 0.5*kn_sz[0], padding='REFLECT')
return x_sft, x_std
def fcn_mlwt(y_t, y_p, kn_sz):
# get smooth shift and scaling
x_sft, x_sc = fcn_sft_std(y_t, kn_sz)
# normalization
y_t = (y_t - x_sft)/x_sc
y_p = (y_p - x_sft)/x_sc
# whitening transform
y_t, y_p = fcn_wt_xy(y_t, y_p, [1, 2], EE_W_Y)
return y_t, y_p
if LOSS_NORM:
def fcn_g_weight(y):
y_std = tf.math.reduce_std(y, axis=[1, 2], keepdims=True)
y_std = tf.math.maximum(y_std, EE_W_Y)
y_w = 1.0/y_std
return y_w
def fcn_g_apply_weight(y_t, y_p):
w = fcn_g_weight(y_t)
# apply weights
y_t = w*y_t
y_p = w*y_p
return y_t, y_p
else:
def fcn_g_apply_weight(y_t, y_p):
return y_t, y_p
#########################################################################################
def fmet_l1(y_true, y_pred):
return nfls.fls_mae(y_true, y_pred)
#########################################################################################
def fls_l1(y_true, y_pred):
return nfls.fls_mae(y_true, y_pred)
#########################################################################################
def fls_l2(y_true, y_pred):
loss = tf.math.squared_difference(y_true, y_pred)
return tf.reduce_mean(tf.math.real(loss))
#########################################################################################
def fls_l1_mlwt(y_true, y_pred):
# whitening transform
y_t, y_p = fcn_wt_xy(y_true, y_pred, [1, 2], EE_W_Y)
# multilocal whitening transform
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[0], MLWT_KSZ[0]))
loss = MLWT_WGTL[0]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p,(MLWT_KSZ[1], MLWT_KSZ[1]))
loss += MLWT_WGTL[1]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[2], MLWT_KSZ[2]))
loss += MLWT_WGTL[2]*nfls.fls_mae(y_t_t, y_p_t)
y_t_t, y_p_t = fcn_mlwt(y_t, y_p, (MLWT_KSZ[3], MLWT_KSZ[3]))
loss += MLWT_WGTL[3]*nfls.fls_mae(y_t_t, y_p_t)
return loss
#########################################################################################
def fls_l1_fft(y_true, y_pred):
loss = FFT_FR*(y_true[..., -1]-y_pred[..., -1]) # in is place in this position in order to avoid numerical overflow
loss = tf.abs(tf.signal.rfft2d(loss))
loss = fcn_pow_fft_n(loss)
return tf.reduce_mean(loss)
#########################################################################################
def fls_l1_mean(y_true, y_pred):
loss = tf.abs(tf.reduce_mean(y_true, axis=[1, 2]) - tf.reduce_mean(y_pred, axis=[1, 2]))
return tf.reduce_mean(loss)
#########################################################################################
def fls_l1_std(y_true, y_pred):
loss = tf.abs(tf.math.reduce_std(y_true, axis=[1, 2]) - tf.math.reduce_std(y_pred, axis=[1, 2]))
return tf.reduce_mean(loss)
#########################################################################################
if LOSS_TYP[2]:
def fls_l1_w(y_true, y_pred):
loss = fls_l1(y_true, y_pred)
loss_w = LOSS_WGT_TYP[2]*loss
return loss_w, loss
else:
def fls_l1_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[3]:
def fls_l2_w(y_true, y_pred):
loss = fls_l2(y_true, y_pred)
loss_w = LOSS_WGT_TYP[3]*loss
return loss_w, loss
else:
def fls_l2_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[4]:
def fls_l1_mlwt_w(y_true, y_pred):
loss = fls_l1_mlwt(y_true, y_pred)
loss_w = LOSS_WGT_TYP[4]*loss
return loss_w, loss
else:
def fls_l1_mlwt_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[5]:
def fls_l1_fft_w(y_true, y_pred):
loss = fls_l1_fft(y_true, y_pred)
loss_w = LOSS_WGT_TYP[5]*loss
return loss_w, loss
else:
def fls_l1_fft_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
#########################################################################################
if LOSS_TYP[6]:
def fls_l1_mean_w(y_true, y_pred):
loss = fls_l1_mean(y_true, y_pred)
loss_w = LOSS_WGT_TYP[6]*loss
return loss_w, loss
else:
def fls_l1_mean_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
if LOSS_TYP[7]:
def fls_l1_std_w(y_true, y_pred):
loss = fls_l1_std(y_true, y_pred)
loss_w = LOSS_WGT_TYP[7]*loss
return loss_w, loss
else:
def fls_l1_std_w(y_true, y_pred):
return tf.constant(0, tf.float32), tf.constant(0, tf.float32)
#########################################################################################
def fls_pw_w(y_t_i, y_p_i):
y_t, y_p = fcn_g_apply_weight(y_t_i, y_p_i)
loss_l1_w, loss_l1 = fls_l1_w(y_t, y_p)
loss_l2_w, loss_l2 = fls_l2_w(y_t, y_p)
loss_l1_mlwt_w, loss_l1_mlwt = fls_l1_mlwt_w(y_t, y_p)
loss_l1_fft_w, loss_l1_fft = fls_l1_fft_w(y_t, y_p)
loss_l1_mean_w, loss_l1_mean = fls_l1_mean_w(y_t, y_p)
loss_l1_std_w, loss_l1_std = fls_l1_std_w(y_t, y_p)
loss_pw_w = loss_l1_w + loss_l2_w + loss_l1_mlwt_w + loss_l1_fft_w + loss_l1_mean_w + loss_l1_std_w
met_l1 = fmet_l1(y_t_i, y_p_i)
return {'loss_pw_w': loss_pw_w, \
'loss_l1_w': loss_l1_w, 'loss_l1': loss_l1, \
'loss_l2_w': loss_l2_w, 'loss_l2': loss_l2, \
'loss_l1_mlwt_w': loss_l1_mlwt_w, 'loss_l1_mlwt': loss_l1_mlwt, \
'loss_l1_fft_w': loss_l1_fft_w, 'loss_l1_fft': loss_l1_fft, \
'loss_l1_mean_w': loss_l1_mean_w, 'loss_l1_mean': loss_l1_mean,\
'loss_l1_std_w': loss_l1_std_w, 'loss_l1_std': loss_l1_std,\
'met_l1': met_l1}
#########################################################################################
def fls_adv(y_d_real, y_d_gen):
y_real = y_d_real - tf.reduce_mean(y_d_gen)
y_gen = y_d_gen - tf.reduce_mean(y_d_real)
loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.zeros_like(y_real), y_real)
loss_gen = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.ones_like(y_gen), y_gen)
loss = loss_real + loss_gen
return loss
def fls_adv_w(y_d_real, y_d_gen):
loss = fls_adv(y_d_real, y_d_gen)
loss_w = LOSS_WGT_TYP[0]*loss
return loss_w, loss
#########################################################################################
def fls_disc(y_d_real, y_d_gen):
y_real = y_d_real - tf.reduce_mean(y_d_gen)
y_gen = y_d_gen - tf.reduce_mean(y_d_real)
d = 0.05
y_ones = tf.ones_like(y_real)-d
loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)(y_ones, y_real)
loss_gen = tf.keras.losses.BinaryCrossentropy(from_logits=True)(tf.zeros_like(y_gen), y_gen)
loss = loss_real + loss_gen
return loss
def fls_disc_w(y_d_real, y_d_gen):
loss = fls_disc(y_d_real, y_d_gen)
loss_w = LOSS_WGT_TYP[1]*loss
return loss_w, loss
#########################################################################################
##################################### generator #########################################
#########################################################################################
# CGRDN global residual connection between input and output
def fcn_net_gen_v1(x, ftrs_i, ftrs_o, ftrs_g, ftrs_btn, kn_sz_dus, stri_dus, kn_sz, act_str, n_lays, n_rdb, n_grdb, name, parm_init=None, parm_reg=None, parm_cstr=None):
dilt_rt = 1
parm_dp = nvgb.fcn_read_dropout_parm_dict()
parm_norm = nvgb.fcn_read_normalization_parm_dict()
dsn_typ = nvgb.DENSENET_TYPE
use_bi = nfly.fcn_use_bias_norm_parm(parm_norm)
act_str_fst = None
act_str_down = None
act_str_up = None
act_str_last = None
dpr_tra_0, dpr_tra_dn, dpr_tra_e, dpr_dn_1, dpr_dn_2 = nfly.fcn_set_dn_dropout(parm_dp['dp_opt'], parm_dp['dp_rt'])[0:5]
# global residual
x_i = x
# first
x = nfly.fcn_conv_2d(x, ftrs_i, kn_sz, (1, 1), act_str_fst, dilt_rt, True, 'same', dpr_tra_0, name + 'fst_0', parm_norm, parm_init, parm_reg, parm_cstr)
# downsampling
x = nfly.fcn_conv_2d_gen(x, ftrs_i, kn_sz_dus, stri_dus, act_str_down, dilt_rt, use_bi, 'same', dpr_tra_0, name + 'down_spl_0', parm_norm, parm_init, parm_reg, parm_cstr)
# middle
x_skc = x
x_cc = []
for ik in range(n_grdb):
name_grdb = name + 'g_' + str(ik+1)
x = nnet.fcn_grdn(x, ftrs_g, ftrs_btn, kn_sz, act_str, n_lays, n_rdb, dilt_rt, (dpr_dn_1, dpr_dn_2), dsn_typ, name_grdb, parm_norm, parm_init, parm_reg, parm_cstr)
x_cc.append(x)
x_cc = tf.keras.layers.Concatenate(axis=3, name=name + 'g_concat')(x_cc)
x = nfly.fcn_conv_2d(x_cc, ftrs_i, (1, 1), (1, 1), nvgb.DENSENET_FSL_ACT, 1, True, 'same', 0.0, name + 'g_fsl', parm_norm, parm_init, parm_reg, parm_cstr)
x = tf.keras.layers.Add(name=name + 'g_add')([x, x_skc])
# upsampling
x = nfly.fcn_dconv_2d_gen(x, ftrs_i, kn_sz_dus, stri_dus, act_str_up, dilt_rt, use_bi, 'same', dpr_tra_e, name + 'up_spl_0', parm_norm, parm_init, parm_reg, parm_cstr)
# last
x = nfly.fcn_conv_2d(x, ftrs_o, kn_sz, (1, 1), act_str_last, dilt_rt, True, 'same', 0.0, name + 'last_0', parm_norm, parm_init, parm_reg={'reg_wtyp': 0, 'reg_atyp': 0}, parm_cstr={'cstr_typ': 0})
# global residual
x = tf.keras.layers.Add(name=name + 'add_0')([x, x_i])
return x
def nn_model_gen(input_shape, input_name, prefix_layer, model_name):
modv = nvgb.MODV % 10
if modv==1:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 16
elif modv==2:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 24
elif modv==3:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==4:
n_lays = 9
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==5:
n_lays = 4
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==6:
n_lays = 5
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==7:
n_lays = 6
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==8:
n_lays = 7
n_rdb = 4
n_grdb = 4
ftrs_g = 32
elif modv==9:
n_lays = 8
n_rdb = 4
n_grdb = 4
ftrs_g = 32
ftrs_i = 64
ftrs_btn = nvgb.DENSENET_BOTTLENECK_FR*ftrs_g
ftrs_o = nvgb.Y_SHAPE[2]
kn_sz_dus = (4, 4)
stri_dus = (2, 2)
kn_sz = (3, 3)
act_str = nvgb.ACTIVATION_STR
x_i = tf.keras.layers.Input(shape=input_shape, name=input_name, dtype='float32')
x = fcn_net_gen_v1(x_i, ftrs_i, ftrs_o, ftrs_g, ftrs_btn, kn_sz_dus, stri_dus, kn_sz, act_str, n_lays, n_rdb, n_grdb, prefix_layer)
return tf.keras.models.Model(inputs=x_i, outputs=x, name=model_name)
################################### discriminator #######################################
def fcn_net_disc_v1(x, ftrs_o, act_str, name, parm_norm=None, parm_init=None, parm_reg=None, parm_cstr=None):
parm_norm = {'norm_typ': 1, 'norm_pos': 2, 'norm_m': 0.95, 'norm_eps': 1e-3}
parm_init = {'init_typ': 7, 'init_sfr': 0.02}
parm_reg = {'reg_wtyp': 0, 'reg_wkn': 2e-6, 'reg_wkn': 2e-5}
parm_cstr = None
ftrs_i = 64
kn_sz = (4, 4)
dp_rt = 0.0
dilt_rt = 1
x = nfly.fcn_conv_2d(x, ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', 0.0, name + 'downspl_1', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 128, 128, 1*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 2*ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', dp_rt, name + 'downspl_2', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 64, 64, 2*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 4*ftrs_i, kn_sz, (2, 2), act_str, dilt_rt, False, 'same', dp_rt, name + 'downspl_3', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 32, 32, 4*ftrs_i)
x = nfly.fcn_conv_2d_bna(x, 8*ftrs_i, kn_sz, (1, 1), act_str, dilt_rt, False, 'same', dp_rt, name + 'middle', parm_norm, parm_init, parm_reg, parm_cstr) # (bs, 32, 32, 8*ftrs_i)
x = nfly.fcn_conv_2d(x, ftrs_o, kn_sz, (1, 1), None, dilt_rt, True, 'same', 0.0, name + 'last', parm_norm, parm_init, parm_reg={'reg_wtyp': 0, 'reg_atyp': 0}, parm_cstr={'cstr_typ': 0}) # (bs, 32, 32, 1)
return x
def nn_model_disc(input_shape, input_name, prefix_layer, model_name):
ftrs_o = 1
act_str = 'leaky_relu'
x_i = tf.keras.layers.Input(shape=input_shape, name=input_name, dtype='float32')
x = fcn_net_disc_v1(x_i, ftrs_o, act_str, prefix_layer)
return tf.keras.models.Model(inputs=x_i, outputs=x, name=model_name)
#########################################################################################
######################################## model ##########################################
#########################################################################################
if GAN_TYP == 1:
class My_model(tf.keras.Model):
def __init__(self, input_shape, *args, **kwargs):
super(My_model, self).__init__(*args, **kwargs)
self.gen = nn_model_gen(input_shape, 'input_gen', 'gen_', 'nEM_model_rest')
self.disc = nn_model_disc((input_shape[0], input_shape[1], 2), 'input_disc', 'disc_', 'nEM_model_disc')
self.met_l1 = tf.keras.metrics.Mean(name="met_l1")
self.val_met_l1= tf.keras.metrics.Mean(name="met_l1")
self.loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.val_loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.loss_gen_w = tf.keras.metrics.Mean(name="loss_gen_w")
self.val_loss_gen_w = tf.keras.metrics.Mean(name="loss_gen_w")
self.loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.val_loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.loss_disc_adv_reg = tf.keras.metrics.Mean(name="loss_disc_adv_reg")
self.val_loss_disc_adv_reg = tf.keras.metrics.Mean(name="loss_disc_adv_reg")
if LOSS_TYP[0]:
self.loss_gen_adv_w = tf.keras.metrics.Mean(name="loss_gen_adv_w")
self.loss_gen_adv = tf.keras.metrics.Mean(name="loss_gen_adv")
self.val_loss_gen_adv_w = tf.keras.metrics.Mean(name="loss_gen_adv_w")
self.val_loss_gen_adv = tf.keras.metrics.Mean(name="val_loss_gen_adv")
if LOSS_TYP[1]:
self.loss_disc_adv_w = tf.keras.metrics.Mean(name="loss_disc_adv_w")
self.loss_disc_adv = tf.keras.metrics.Mean(name="loss_disc_adv")
self.val_loss_disc_adv_w = tf.keras.metrics.Mean(name="loss_disc_adv_w")
self.val_loss_disc_adv = tf.keras.metrics.Mean(name="loss_disc_adv")
if LOSS_TYP[2]:
self.loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
self.val_loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.val_loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
if LOSS_TYP[3]:
self.loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
self.val_loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.val_loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
if LOSS_TYP[4]:
self.loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
self.val_loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.val_loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
if LOSS_TYP[5]:
self.loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
self.val_loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.val_loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
if LOSS_TYP[6]:
self.loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
self.val_loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.val_loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
if LOSS_TYP[7]:
self.loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.val_loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.val_loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.ibg = tf.Variable(0.0, dtype=tf.float32, trainable=False)
self.ibg_m = 2*nvgb.TRAIN_N_DATA//nvgb.BATCH_SIZE
def compile(self, parm):
super(My_model, self).compile()
self.gen_opt = nfop.fcn_get_optimizer_from_vgb()
self.lr_schedule_gen = nfcb.Cb_Lr_schedule_base(
lr_min=parm['lr_min'],
lr_max=parm['opt_lr'],
m_min=parm['m_0'],
m_max=parm['opt_m'],
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=parm['lr_0'],
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
#########################################################################################
opt_typ, opt_lr, opt_m = 3, nvgb.OPTIMIZER_LR, 0.5
self.disc_opt = nfop.fcn_get_optimizer(opt_typ=opt_typ, opt_lr=opt_lr,
opt_m=opt_m, opt_nesterov=nvgb.OPTIMIZER_NESTEROV,
opt_beta_2=nvgb.OPTIMIZER_BETA_2, opt_eps=nvgb.OPTIMIZER_EPSILON,
opt_ctyp=0, opt_cval=nvgb.OPTIMIZER_CLIP_VALUE)
self.lr_schedule_disc = nfcb.Cb_Lr_schedule_base(
lr_min=1e-4*opt_lr,
lr_max=opt_lr,
m_min=0.0,
m_max=opt_m,
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=1e-4*opt_lr,
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
def reset_opt_iter(self):
tf.keras.backend.set_value(self.gen_opt.iterations, 0)
tf.keras.backend.set_value(self.disc_opt.iterations, 0)
def set_opt_lr_m(self):
self.lr_schedule_gen.get_set_opt_lr_m(self.gen_opt)
self.lr_schedule_disc.get_set_opt_lr_m(self.disc_opt)
def inc_opt_counter(self):
self.lr_schedule_gen.inc_counter()
self.lr_schedule_disc.inc_counter()
def call(self, inputs, training=None, mask=None):
return self.gen(inputs, training)
@tf.function
def train_step(self, data):
x, y = data
x = fcn_x(x, y, self.ibg, self.ibg_m)
with tf.GradientTape() as tape_gen, tf.GradientTape() as tape_disc:
# Forward pass
y_p = self.gen(x, training=True)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
x_d, x_d_p = fcn_input_disc(x, y, y_p)
y_d = self.disc(x_d, training=True)
y_d_p = self.disc(x_d_p, training=True)
# gan-gen loss
loss_gen_adv_w, loss_gen_adv = fls_adv_w(y_d, y_d_p)
# gen loss
loss_gen_w = loss_gen_adv_w + loss_gen_pw_dict['loss_pw_w']
# gan-disc loss
loss_disc_adv_w, loss_disc_adv = fls_disc_w(y_d, y_d_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_disc_adv_reg = tf.reduce_sum(self.disc.losses)
loss_gen_t = loss_gen_w + loss_gen_reg
loss_disc_adv_t = loss_disc_adv_w + loss_disc_adv_reg
# Compute gradient
grad_gen_t = tape_gen.gradient(loss_gen_t, self.gen.trainable_variables)
grad_disc_t = tape_disc.gradient(loss_disc_adv_t, self.disc.trainable_variables)
#clip gradients
grad_gen_t = nfop.fcn_optimizer_clip_gradients(self.gen_opt, grad_gen_t)
grad_disc_t = nfop.fcn_optimizer_clip_gradients(self.disc_opt, grad_disc_t)
# Update gradient
self.gen_opt.apply_gradients(zip(grad_gen_t, self.gen.trainable_variables))
self.disc_opt.apply_gradients(zip(grad_disc_t, self.disc.trainable_variables))
# save metrics
self.met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.met_l1.result()}
# save losses
self.loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.loss_pw_w.result()})
self.loss_gen_w.update_state(loss_gen_w)
metrics_out.update({'loss_gen_w': self.loss_gen_w.result()})
self.loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.loss_gen_reg.result()})
self.loss_disc_adv_reg.update_state(loss_disc_adv_reg)
metrics_out.update({'loss_disc_adv_reg': self.loss_disc_adv_reg.result()})
if LOSS_TYP[0]:
self.loss_gen_adv_w.update_state(loss_gen_adv_w)
self.loss_gen_adv.update_state(loss_gen_adv)
metrics_out.update({'loss_gen_adv_w': self.loss_gen_adv_w.result(), 'loss_gen_adv': self.loss_gen_adv.result()})
if LOSS_TYP[1]:
self.loss_disc_adv_w.update_state(loss_disc_adv_w)
self.loss_disc_adv.update_state(loss_disc_adv)
metrics_out.update({'loss_disc_adv_w': self.loss_disc_adv_w.result(), 'loss_disc_adv': self.loss_disc_adv.result()})
if LOSS_TYP[2]:
self.loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.loss_l1_w.result(), 'loss_l1': self.loss_l1.result()})
if LOSS_TYP[3]:
self.loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.loss_l2_w.result(), 'loss_l2': self.loss_l2.result()})
if LOSS_TYP[4]:
self.loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.loss_l1_fft_w.result(), 'loss_l1_fft': self.loss_l1_fft.result()})
if LOSS_TYP[6]:
self.loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.loss_l1_mean_w.result(), 'loss_l1_mean': self.loss_l1_mean.result()})
if LOSS_TYP[7]:
self.loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.loss_l1_std_w.result(), 'loss_l1_std': self.loss_l1_std.result()})
self.ibg.assign_add(1.0)
return metrics_out
@tf.function
def test_step(self, data):
x, y = data
# Forward pass
y_p = self.gen(x, training=False)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
x_d, x_d_p = fcn_input_disc(x, y, y_p)
y_d = self.disc(x_d, training=False)
y_d_p = self.disc(x_d_p, training=False)
# gan-gen loss
loss_gen_adv_w, loss_gen_adv = fls_adv_w(y_d, y_d_p)
# gen loss
loss_gen_w = loss_gen_adv_w + loss_gen_pw_dict['loss_pw_w']
# gan-disc loss
loss_disc_adv_w, loss_disc_adv = fls_disc_w(y_d, y_d_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_disc_adv_reg = tf.reduce_sum(self.disc.losses)
# save metrics
self.val_met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.val_met_l1.result()}
# save losses
self.val_loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.val_loss_pw_w.result()})
self.val_loss_gen_w.update_state(loss_gen_w)
metrics_out.update({'loss_gen_w': self.val_loss_gen_w.result()})
self.val_loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.val_loss_gen_reg.result()})
self.val_loss_disc_adv_reg.update_state(loss_disc_adv_reg)
metrics_out.update({'loss_disc_adv_reg': self.val_loss_disc_adv_reg.result()})
if LOSS_TYP[0]:
self.val_loss_gen_adv_w.update_state(loss_gen_adv_w)
self.val_loss_gen_adv.update_state(loss_gen_adv)
metrics_out.update({'loss_gen_adv_w': self.val_loss_gen_adv_w.result(), 'loss_gen_adv': self.val_loss_gen_adv.result()})
if LOSS_TYP[1]:
self.val_loss_disc_adv_w.update_state(loss_disc_adv_w)
self.val_loss_disc_adv.update_state(loss_disc_adv)
metrics_out.update({'loss_disc_adv_w': self.val_loss_disc_adv_w.result(), 'loss_disc_adv': self.val_loss_disc_adv.result()})
if LOSS_TYP[2]:
self.val_loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.val_loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.val_loss_l1_w.result(), 'loss_l1': self.val_loss_l1.result()})
if LOSS_TYP[3]:
self.val_loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.val_loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.val_loss_l2_w.result(), 'loss_l2': self.val_loss_l2.result()})
if LOSS_TYP[4]:
self.val_loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.val_loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.val_loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.val_loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.val_loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.val_loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.val_loss_l1_fft_w.result(), 'loss_l1_fft': self.val_loss_l1_fft.result()})
if LOSS_TYP[6]:
self.val_loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.val_loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.val_loss_l1_mean_w.result(), 'loss_l1_mean': self.val_loss_l1_mean.result()})
if LOSS_TYP[7]:
self.val_loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.val_loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.val_loss_l1_std_w.result(), 'loss_l1_std': self.val_loss_l1_std.result()})
return metrics_out
@property
def metrics(self):
metrics_out = [self.met_l1, self.val_met_l1,
self.loss_pw_w, self.val_loss_pw_w,
self.loss_gen_w, self.val_loss_gen_w,
self.loss_gen_reg, self.val_loss_gen_reg,
self.loss_disc_adv_reg, self.val_loss_disc_adv_reg]
if LOSS_TYP[0]:
metrics_out.extend([self.loss_gen_adv_w, self.loss_gen_adv, self.val_loss_gen_adv_w, self.val_loss_gen_adv])
if LOSS_TYP[1]:
metrics_out.extend([self.loss_disc_adv_w, self.loss_disc_adv, self.val_loss_disc_adv_w, self.val_loss_disc_adv])
if LOSS_TYP[2]:
metrics_out.extend([self.loss_l1_w, self.loss_l1, self.val_loss_l1_w, self.val_loss_l1])
if LOSS_TYP[3]:
metrics_out.extend([self.loss_l2_w, self.loss_l2, self.val_loss_l2_w, self.val_loss_l2])
if LOSS_TYP[4]:
metrics_out.extend([self.loss_l1_mlwt_w, self.loss_l1_mlwt, self.val_loss_l1_mlwt_w, self.val_loss_l1_mlwt])
if LOSS_TYP[5]:
metrics_out.extend([self.loss_l1_fft_w, self.loss_l1_fft, self.val_loss_l1_fft_w, self.val_loss_l1_fft])
if LOSS_TYP[6]:
metrics_out.extend([self.loss_l1_mean_w, self.loss_l1_mean, self.val_loss_l1_mean_w, self.val_loss_l1_mean])
if LOSS_TYP[7]:
metrics_out.extend([self.loss_l1_std_w, self.loss_l1_std, self.val_loss_l1_std_w, self.val_loss_l1_std])
return metrics_out
else:
class My_model(tf.keras.Model):
def __init__(self, input_shape, *args, **kwargs):
super(My_model, self).__init__(*args, **kwargs)
self.gen = nn_model_gen(input_shape, 'input_gen', 'gen_', 'nEM_model_rest')
self.met_l1 = tf.keras.metrics.Mean(name="met_l1")
self.val_met_l1= tf.keras.metrics.Mean(name="met_l1")
self.loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.val_loss_pw_w = tf.keras.metrics.Mean(name="loss_pw_w")
self.loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
self.val_loss_gen_reg = tf.keras.metrics.Mean(name="loss_gen_reg")
if LOSS_TYP[2]:
self.loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
self.val_loss_l1_w = tf.keras.metrics.Mean(name="loss_l1_w")
self.val_loss_l1 = tf.keras.metrics.Mean(name="loss_l1")
if LOSS_TYP[3]:
self.loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
self.val_loss_l2_w = tf.keras.metrics.Mean(name="loss_l2_w")
self.val_loss_l2 = tf.keras.metrics.Mean(name="loss_l2")
if LOSS_TYP[4]:
self.loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
self.val_loss_l1_mlwt_w = tf.keras.metrics.Mean(name="loss_l1_mlwt_w")
self.val_loss_l1_mlwt = tf.keras.metrics.Mean(name="loss_l1_mlwt")
if LOSS_TYP[5]:
self.loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
self.val_loss_l1_fft_w = tf.keras.metrics.Mean(name="loss_l1_fft_w")
self.val_loss_l1_fft = tf.keras.metrics.Mean(name="loss_l1_fft")
if LOSS_TYP[6]:
self.loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
self.val_loss_l1_mean_w = tf.keras.metrics.Mean(name="loss_l1_mean_w")
self.val_loss_l1_mean = tf.keras.metrics.Mean(name="loss_l1_mean")
if LOSS_TYP[7]:
self.loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.val_loss_l1_std_w = tf.keras.metrics.Mean(name="loss_l1_std_w")
self.val_loss_l1_std = tf.keras.metrics.Mean(name="loss_l1_std")
self.ibg = tf.Variable(0.0, dtype=tf.float32, trainable=False)
self.ibg_m = 2*nvgb.TRAIN_N_DATA//nvgb.BATCH_SIZE
def compile(self, parm):
super(My_model, self).compile()
self.gen_opt = nfop.fcn_get_optimizer_from_vgb()
self.lr_schedule_gen = nfcb.Cb_Lr_schedule_base(
lr_min=parm['lr_min'],
lr_max=parm['opt_lr'],
m_min=parm['m_0'],
m_max=parm['opt_m'],
decay_steps=parm['decay_steps'],
decay_rate=parm['decay_rate'],
steps_per_cycle=parm['decay_steps'],
warmup_steps=parm['warmup_steps'],
cooldown_steps=parm['cooldown_steps'],
lr_0=parm['lr_0'],
warmup_steps_0=parm['warmup_steps_0'],
cooldown_steps_0=parm['cooldown_steps_0'],
decay_steps_0=parm['decay_steps_0'],
lrs_m_pow=parm['lrs_m_pow'],
lrs_lr_pow=parm['lrs_lr_pow'])
def reset_opt_iter(self):
tf.keras.backend.set_value(self.gen_opt.iterations, 0)
def set_opt_lr_m(self):
self.lr_schedule_gen.get_set_opt_lr_m(self.gen_opt)
def inc_opt_counter(self):
self.lr_schedule_gen.inc_counter()
def call(self, inputs, training=None, mask=None):
return self.gen(inputs, training)
@tf.function
def train_step(self, data):
x, y = data
x = fcn_x(x, y, self.ibg, self.ibg_m)
with tf.GradientTape() as tape_gen:
# Forward pass
y_p = self.gen(x, training=True)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
loss_gen_t = loss_gen_reg + loss_gen_pw_dict['loss_pw_w']
# Compute gradient
grad_gen_t = tape_gen.gradient(loss_gen_t, self.gen.trainable_variables)
#clip gradients
grad_gen_t = nfop.fcn_optimizer_clip_gradients(self.gen_opt, grad_gen_t)
# Update gradient
self.gen_opt.apply_gradients(zip(grad_gen_t, self.gen.trainable_variables))
# save metrics
self.met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.met_l1.result()}
# save losses
self.loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.loss_pw_w.result()})
self.loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.loss_gen_reg.result()})
if LOSS_TYP[2]:
self.loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.loss_l1_w.result(), 'loss_l1': self.loss_l1.result()})
if LOSS_TYP[3]:
self.loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.loss_l2_w.result(), 'loss_l2': self.loss_l2.result()})
if LOSS_TYP[4]:
self.loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.loss_l1_fft_w.result(), 'loss_l1_fft': self.loss_l1_fft.result()})
if LOSS_TYP[6]:
self.loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.loss_l1_mean_w.result(), 'loss_l1_mean': self.loss_l1_mean.result()})
if LOSS_TYP[7]:
self.loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.loss_l1_std_w.result(), 'loss_l1_std': self.loss_l1_std.result()})
self.ibg.assign_add(1.0)
return metrics_out
@tf.function
def test_step(self, data):
x, y = data
# Forward pass
y_p = self.gen(x, training=False)
# pixelwise loss
loss_gen_pw_dict = fls_pw_w(y, y_p)
# regularization loss
loss_gen_reg = tf.reduce_sum(self.gen.losses)
# save metrics
self.val_met_l1.update_state(loss_gen_pw_dict['met_l1'])
metrics_out = {'met_l1': self.val_met_l1.result()}
# save losses
self.val_loss_pw_w.update_state(loss_gen_pw_dict['loss_pw_w'])
metrics_out.update({'loss_pw_w': self.val_loss_pw_w.result()})
self.val_loss_gen_reg.update_state(loss_gen_reg)
metrics_out.update({'loss_gen_reg': self.val_loss_gen_reg.result()})
if LOSS_TYP[2]:
self.val_loss_l1_w.update_state(loss_gen_pw_dict['loss_l1_w'])
self.val_loss_l1.update_state(loss_gen_pw_dict['loss_l1'])
metrics_out.update({'loss_l1_w': self.val_loss_l1_w.result(), 'loss_l1': self.val_loss_l1.result()})
if LOSS_TYP[3]:
self.val_loss_l2_w.update_state(loss_gen_pw_dict['loss_l2_w'])
self.val_loss_l2.update_state(loss_gen_pw_dict['loss_l2'])
metrics_out.update({'loss_l2_w': self.val_loss_l2_w.result(), 'loss_l2': self.val_loss_l2.result()})
if LOSS_TYP[4]:
self.val_loss_l1_mlwt_w.update_state(loss_gen_pw_dict['loss_l1_mlwt_w'])
self.val_loss_l1_mlwt.update_state(loss_gen_pw_dict['loss_l1_mlwt'])
metrics_out.update({'loss_l1_mlwt_w': self.val_loss_l1_mlwt_w.result(), 'loss_l1_mlwt': self.val_loss_l1_mlwt.result()})
if LOSS_TYP[5]:
self.val_loss_l1_fft_w.update_state(loss_gen_pw_dict['loss_l1_fft_w'])
self.val_loss_l1_fft.update_state(loss_gen_pw_dict['loss_l1_fft'])
metrics_out.update({'loss_l1_fft_w': self.val_loss_l1_fft_w.result(), 'loss_l1_fft': self.val_loss_l1_fft.result()})
if LOSS_TYP[6]:
self.val_loss_l1_mean_w.update_state(loss_gen_pw_dict['loss_l1_mean_w'])
self.val_loss_l1_mean.update_state(loss_gen_pw_dict['loss_l1_mean'])
metrics_out.update({'loss_l1_mean_w': self.val_loss_l1_mean_w.result(), 'loss_l1_mean': self.val_loss_l1_mean.result()})
if LOSS_TYP[7]:
self.val_loss_l1_std_w.update_state(loss_gen_pw_dict['loss_l1_std_w'])
self.val_loss_l1_std.update_state(loss_gen_pw_dict['loss_l1_std'])
metrics_out.update({'loss_l1_std_w': self.val_loss_l1_std_w.result(), 'loss_l1_std': self.val_loss_l1_std.result()})
return metrics_out
@property
def metrics(self):
metrics_out = [self.met_l1, self.val_met_l1,
self.loss_pw_w, self.val_loss_pw_w,
self.loss_gen_reg, self.val_loss_gen_reg]
if LOSS_TYP[2]:
metrics_out.extend([self.loss_l1_w, self.loss_l1, self.val_loss_l1_w, self.val_loss_l1])
if LOSS_TYP[3]:
metrics_out.extend([self.loss_l2_w, self.loss_l2, self.val_loss_l2_w, self.val_loss_l2])
if LOSS_TYP[4]:
metrics_out.extend([self.loss_l1_mlwt_w, self.loss_l1_mlwt, self.val_loss_l1_mlwt_w, self.val_loss_l1_mlwt])
if LOSS_TYP[5]:
metrics_out.extend([self.loss_l1_fft_w, self.loss_l1_fft, self.val_loss_l1_fft_w, self.val_loss_l1_fft])
if LOSS_TYP[6]:
metrics_out.extend([self.loss_l1_mean_w, self.loss_l1_mean, self.val_loss_l1_mean_w, self.val_loss_l1_mean])
if LOSS_TYP[7]:
metrics_out.extend([self.loss_l1_std_w, self.loss_l1_std, self.val_loss_l1_std_w, self.val_loss_l1_std])
return metrics_out
#########################################################################################
####################################### write image #####################################
#########################################################################################
class Cb_Test_Plot(tf.keras.callbacks.Callback):
def __init__(self, parm): # add other arguments to __init__ if you need
super(Cb_Test_Plot, self).__init__()
self.wi_log_dir = os.path.join(parm['log_dir'], 'test')
self.n_spl = parm['test_n_samples']
self.parm = parm
# spacing for plotting
self.wspace = 0.025
self.vspace = 0.010
self.wsize_fig = 25.0
self.file_test_writer = tf.summary.create_file_writer(logdir=self.wi_log_dir, max_queue=1)
x, y = self.fcn_load_test_mat(self.parm['test_mat_path'])
self.wi_ds_x = tf.data.Dataset.from_tensor_slices(x).batch(self.n_spl)
self.wi_x = self.fcn_norm_x_image(x)
self.wi_y_sft, self.wi_y_sc = nfge.fcn_get_norm_parm_image(y, [1, 2])
self.wi_y = y
self.wi_ibg = np.array(0).astype(np.int64)
self.wi_ickp = np.array(0).astype(np.int64)
def fcn_load_test_mat(self, path_mat):
x_mat, y_mat = nfge.fcn_ds_read_x_y_mat(path_mat, 'x', 'y', x_typ_mat, y_typ_mat, x_ndim, y_ndim)
x_mat = x_mat[..., 0:self.n_spl].copy()
x_mat = np.moveaxis(x_mat, 3, 0)
x = tf.convert_to_tensor(x_mat, x_typ_tf[0])
y_mat = y_mat[..., 0:self.n_spl].copy()
y_mat = np.moveaxis(y_mat, 3, 0)
y = tf.convert_to_tensor(y_mat, y_typ_tf[0])
x, y = fcn_resize_xy(x, y, False)
x, y = fcn_scale_inputs_x_y(x, y, [1, 2])
return x, y
def fcn_norm_x_image(self, x, axis=[1, 2]):
xn = nfge.fcn_norm_image(x, axis)
return xn.numpy()
def fcn_norm_y_image(self, y, axis=[1, 2]):
yn = nfge.fcn_norm_image(y, axis, self.wi_y_sft, self.wi_y_sc)
return yn.numpy()
def fcn_xy_image_gen(self, y_t, y_p):
x = self.wi_x
if type(x) is not np.ndarray:
x = x.numpy()
if type(y_t) is not np.ndarray:
y_t = y_t.numpy()
if type(y_p) is not np.ndarray:
y_p = y_p.numpy()
yn = self.fcn_norm_y_image(y_t)
yn_p = self.fcn_norm_y_image(y_p)
rows = 4
hsize_fig = (self.wsize_fig-(self.n_spl-1)*self.wspace)*rows/self.n_spl
figure = plt.figure(figsize=(self.wsize_fig, hsize_fig))
for ik in range(self.n_spl):
x_ik = x[ik, ...].squeeze()
dy = y_t[ik, ...].squeeze() - y_p[ik, ...].squeeze()
ee = np.mean(np.fabs(dy))
dyn = dy - np.min(dy)
dyn = dyn/np.max(dyn)
y_ik = yn[ik, ...].squeeze()
y_p_ik = yn_p[ik, ...].squeeze()
for iy in range(rows):
im_x_ik = x_ik if iy==0 else y_p_ik if iy==1 else y_ik if iy==2 else dyn
ax = plt.subplot(4, self.n_spl, iy*self.n_spl + ik + 1)
ax.imshow(im_x_ik)
ax.set_xticks([])
ax.set_yticks([])
ax.grid(False)
if iy==0:
title = 'e = {:4.3f}'.format(ee)
ax.set_title(title, fontsize=14)
figure.subplots_adjust(hspace=self.vspace, wspace=self.wspace)
figure.tight_layout()
return nfge.fcn_plot_to_image(figure)
def on_train_begin(self, logs=None):
self.model.reset_opt_iter()
def on_train_batch_begin(self, batch, logs=None):
self.model.set_opt_lr_m()
# saving learning rate and momentum
if self.wi_ibg % self.parm['log_update_freq'] == 0:
with self.file_test_writer.as_default():
lr, m = nfop.fcn_read_opt_lr(self.model.gen_opt), nfop.fcn_read_opt_m(self.model.gen_opt)
tf.summary.scalar(name='batch_gen_lr', data=lr, step=self.wi_ibg)
tf.summary.scalar(name='batch_gen_m', data=m, step=self.wi_ibg)
if GAN_TYP == 1:
lr, m = nfop.fcn_read_opt_lr(self.model.disc_opt), nfop.fcn_read_opt_m(self.model.disc_opt)
tf.summary.scalar(name='batch_disc_lr', data=lr, step=self.wi_ibg)
tf.summary.scalar(name='batch_disc_m', data=m, step=self.wi_ibg)
# saving test data
if self.wi_ibg % self.parm['test_update_freq'] == 0:
y_p = self.model.predict(self.wi_ds_x)
with self.file_test_writer.as_default():
tf.summary.image(name='data', data=self.fcn_xy_image_gen(self.wi_y, y_p), step=self.wi_ibg)
# saving weights
if self.wi_ibg % self.parm['log_checkpoint_freq'] == 0:
self.model.gen.save_weights(self.parm['ckp_gen_path'].format(self.wi_ickp))
if GAN_TYP == 1:
self.model.disc.save_weights(self.parm['ckp_disc_path'].format(self.wi_ickp))
self.wi_ickp += 1
# reset metrics
if (self.wi_ibg > 0) and (self.wi_ibg % self.parm['reset_metric_freq'] == 0):
self.model.reset_metrics()
self.wi_ibg += 1
self.model.inc_opt_counter()
#########################################################################################
#########################################################################################
#########################################################################################
def fcn_load_weights(model, path_dir, bb_load_disc=True, by_name=True):
if os.path.exists(path_dir):
dir_weights = nfge.fcn_read_files(path_dir, ".h5")
path_gen = [s for s in dir_weights if "gen" in s]
path_gen.sort()
if len(path_gen):
model.gen = nfge.fcn_load_weights(model.gen, path_gen[-1], by_name=by_name, skip_mismatch=False)
if bb_load_disc:
path_disc = [s for s in dir_weights if "disc" in s]
path_disc.sort()
if len(path_disc) and (GAN_TYP == 1):
model.disc = nfge.fcn_load_weights(model.disc, path_disc[-1], by_name=by_name, skip_mismatch=False)
return model
def fcn_compile_and_fit(parm, bb_print_model_summary=False):
def fcn_compile_strategy(parm, bb_print_model_summary):
# set optimizer
model = My_model(parm['input_shape'])
if parm['bb_parm_search']:
# save weights
if not os.path.exists(parm['grid_search_dir']):
os.makedirs(parm['grid_search_dir'])
# load initial weights
model = nfge.fcn_load_weights(model, parm['weigths_load_path'], bb_load_disc=True, by_name=True)
# save weights
model.save_weights(parm['weigths_search_path'])
else:
model = fcn_load_weights(model, parm['weigths_load_path'], bb_load_disc=True, by_name=True)
# reset ibg value
model.ibg.assign(0.0)
# print summary
if bb_print_model_summary:
print(model.gen.summary())
if GAN_TYP == 1:
print(model.disc.summary())
# print training information
nvgb.fcn_print_training_parm(parm)
# compile
model.compile(parm)
return model
# clear session
tf.keras.backend.clear_session()
# generate model
if nvgb.N_GPUS == 1:
model = fcn_compile_strategy(parm, bb_print_model_summary)
else:
mirrored_strategy = tf.distribute.MirroredStrategy(cross_device_ops = tf.distribute.ReductionToOneDevice())
with mirrored_strategy.scope():
model = fcn_compile_strategy(parm, bb_print_model_summary)
# set datasets
train_dataset, val_dataset = nfge.fcn_load_datasets(fcn_ds_map, parm)
if parm['bb_parm_search']:
val_dataset = None
parm['validation_steps'] = None
parm['validation_freq'] = 1
#fit
model.fit(train_dataset,
epochs=parm['epochs'],
steps_per_epoch=parm['train_steps'],
callbacks=parm['callbacks'],
validation_data=val_dataset,
validation_steps=parm['validation_steps'],
validation_freq=parm['validation_freq'],
initial_epoch=parm['initial_epoch'],
verbose=parm['verbose_fit'])
#########################################################################################
#################################### initialization #####################################
#########################################################################################
def fcn_init(bb_parm_search=False):
# parse arguments
parser = nfge.fcn_parse_gen_input_parm()
root = '/media/ssd_1'
dat_id = vars(parser.parse_args())['modv'] // 100
# 0: gan - gen
# 1: gan - disc
# 2: L1
# 3: L2
# 4: L1 - MLWT
# 5: L1 - FFT
# 6: mean
# 7: std
if dat_id == 1:
dir_db = 'dataset_hrsem'
elif dat_id == 2:
dir_db = 'dataset_lrsem'
elif dat_id == 3:
dir_db = 'dataset_hrstem'
elif dat_id == 4:
dir_db = 'dataset_lrstem'
elif dat_id == 5:
dir_db = 'dataset_hrtem'
elif dat_id == 6:
dir_db = 'dataset_lrtem'
parm = nfge.fcn_init(parser,
db_dir=dir_db,
opt_nesterov=False,
opt_beta_2=0.999,
opt_eps=1.0e-5,
opt_ctyp=0, # 0: None, 1: clip by value, 2: clip by norm
opt_cval=2.0, # 0: 4.0, 1: 8.0
norm_trainable=True,
norm_eps=1e-3,
norm_pos=1, # 1: before layer, 2: after layer
norm_reg_typ=0,
norm_reg_gamma=0,
norm_reg_beta=1e-8,
norm_cstr_typ=0,
norm_cstr_v0=0.01,
norm_cstr_ve=8.0,
norm_cstr_rt=0.01,
norm_cstr_ax=[0],
norm_renorm=False,
norm_renorm_m=0.99,
dp_spt=False, # use spatial dropout
n_classes=1,
res_sfr=1.0,
dsn_typ=3, # 1: dense, 2: dense bottleneck, 3: residual dense, 4: residual dense bottleneck, 5: down/up residual dense, 6: down/up residual dense bottleneck
dsn_compr=0.5,
dsn_in_fr=2,
dsn_btn_fr=4,
dsn_fsl_act=None,
bb_parm_search=bb_parm_search,
root=root,
fn_ext=None,
gpu_memory_limit=None)
parm['input_shape'] = (nvgb.X_SHAPE[0]//DS_FTR, nvgb.X_SHAPE[1]//DS_FTR, nvgb.X_SHAPE[2])
parm['output_shape'] = (nvgb.Y_SHAPE[0]//DS_FTR, nvgb.Y_SHAPE[1]//DS_FTR, nvgb.Y_SHAPE[2])
parm['x_shape'] = (nvgb.X_SHAPE[0], nvgb.X_SHAPE[1], nvgb.Y_SHAPE[2])
parm['y_shape'] = (nvgb.Y_SHAPE[0], nvgb.Y_SHAPE[1], nvgb.Y_SHAPE[2])
parm['x_typ_mat'] = x_typ_mat
parm['y_typ_mat'] = y_typ_mat
parm['x_typ_rtf'] = x_typ_rtf
parm['y_typ_rtf'] = y_typ_rtf
parm['x_typ_tf'] = x_typ_tf
parm['y_typ_tf'] = y_typ_tf
parm['ckp_default'] = False
log_dir_root = os.path.split(parm['checkpoint_path'])[0]
parm['ckp_gen_path'] = os.path.join(log_dir_root, 'gen-{:04d}.h5')
parm['ckp_disc_path'] = os.path.join(log_dir_root, 'disc-{:04d}.h5')
return parm | 52,012 | 34.215301 | 204 | py |
r_em | r_em-master/tk_r_em/tk_r_em.py | """
r_em network suites designed to restore different modalities of electron microscopy data
Author: Ivan Lobato
Email: Ivanlh20@gmail.com
"""
import os
import pathlib
from typing import Tuple
import h5py
import numpy as np
import tensorflow as tf
def expand_dimensions(x):
if x.ndim == 2:
return np.expand_dims(x, axis=(0, 3))
elif x.ndim == 3 and x.shape[-1] != 1:
return np.expand_dims(x, axis=3)
else:
return x
def add_extra_row_or_column(x):
if x.shape[1] % 2 == 1:
v_mean = x.mean(axis=(1, 2), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, 1, x.shape[2], 1))
x = np.concatenate((x, v_mean_tiled), axis=1)
if x.shape[2] % 2 == 1:
v_mean = x.mean(axis=(1, 2), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, x.shape[1], 1, 1))
x = np.concatenate((x, v_mean_tiled), axis=2)
return x
def add_extra_row_or_column_patch_based(x):
if x.shape[0] % 2 == 1:
v_mean = x.mean(axis=(0, 1), keepdims=True)
v_mean_tiled = np.tile(v_mean, (1, x.shape[1]))
x = np.concatenate((x, v_mean_tiled), axis=0)
if x.shape[1] % 2 == 1:
v_mean = x.mean(axis=(0, 1), keepdims=True)
v_mean_tiled = np.tile(v_mean, (x.shape[0], 1))
x = np.concatenate((x, v_mean_tiled), axis=1)
return x
def remove_extra_row_or_column(x, x_i_sh):
if x_i_sh != x.shape:
return x[:, :x_i_sh[1], :x_i_sh[2], :]
else:
return x
def remove_extra_row_or_column_patch_based(x, x_i_sh):
if x_i_sh != x.shape:
return x[:x_i_sh[0], :x_i_sh[1]]
else:
return x
def adjust_output_dimensions(x, x_i_shape):
ndim = len(x_i_shape)
if ndim == 2:
return x.squeeze()
elif ndim == 3:
if x_i_shape[-1] == 1:
return x.squeeze(axis=0)
else:
return x.squeeze(axis=-1)
else:
return x
def get_centered_range(n, patch_size, stride):
patch_size_half = patch_size // 2
if patch_size_half == n-patch_size_half:
return np.array([patch_size_half])
p = np.arange(patch_size_half, n-patch_size_half, stride)
if p[-1] + patch_size_half < n:
p = np.append(p, n - patch_size_half)
return p
def get_range(im_shape, patch_size, strides):
py = get_centered_range(im_shape[0], patch_size[0], strides[0])
px = get_centered_range(im_shape[1], patch_size[1], strides[1])
for iy in py:
for ix in px:
yield slice(iy - patch_size[0] // 2, iy + patch_size[0] // 2), slice(ix - patch_size[1] // 2, ix + patch_size[1] // 2)
def process_prediction(data, x_r, count_map, window, ib, sy, sx):
for ik in range(ib):
x_r_ik = data[ik, ..., 0].squeeze() * window
count_map[sy[ik], sx[ik]] += window
x_r[sy[ik], sx[ik]] += x_r_ik
def butterworth_window(shape, cutoff_radius_ftr, order):
assert len(shape) == 2, "Shape must be a tuple of length 2 (height, width)"
assert 0 < cutoff_radius_ftr <= 0.5, "Cutoff frequency must be in the range (0, 0.5]"
def butterworth_1d(length, cutoff_radius_ftr, order):
n = np.arange(-length//2, length-length//2)
window = 1 / (1 + (n / (cutoff_radius_ftr * length)) ** (2 * order))
return window
window_y = butterworth_1d(shape[0], cutoff_radius_ftr, order)
window_x = butterworth_1d(shape[1], cutoff_radius_ftr, order)
window = np.outer(window_y, window_x)
return window
class Model(tf.keras.Model):
def __init__(self, model_path):
super(Model, self).__init__()
self.base_model = tf.keras.models.load_model(model_path, compile=False)
self.base_model.compile()
def call(self, inputs, training=None, mask=None):
return self.base_model(inputs, training=training, mask=mask)
def summary(self):
return self.base_model.summary()
def predict(self, x, batch_size=16, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False):
x_i_sh = x.shape
# Expanding dimensions based on the input shape
x = expand_dimensions(x)
# Converting to float32 if necessary
x = x.astype(np.float32)
x_i_sh_e = x.shape
# Adding extra row or column if necessary
x = add_extra_row_or_column(x)
batch_size = min(batch_size, x.shape[0])
# Model prediction
x = self.base_model.predict(x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
# Removing extra row or column if added
x = remove_extra_row_or_column(x, x_i_sh_e)
# Adjusting output dimensions to match input dimensions
return adjust_output_dimensions(x, x_i_sh)
def predict_patch_based(self, x, patch_size=None, stride=None, batch_size=16):
if patch_size is None:
return self.predict(x, batch_size=batch_size)
x = x.squeeze().astype(np.float32)
x_i_sh_e = x.shape
# Adding extra row or column if necessary
x = add_extra_row_or_column_patch_based(x)
patch_size = max(patch_size, 128)
patch_size = (min(patch_size, x.shape[0]), min(patch_size, x.shape[1]))
# Adjust the stride to have an overlap between patches
overlap = (patch_size[0]//2, patch_size[1]//2)
if stride is None:
stride = overlap
else:
stride = (min(stride, overlap[0]), min(stride, overlap[1]))
batch_size = max(batch_size, 4)
data = np.zeros((batch_size, *patch_size, 1), dtype=np.float32)
sy = [slice(0) for _ in range(batch_size)]
sx = [slice(0) for _ in range(batch_size)]
x_r = np.zeros(x.shape, dtype=np.float32)
count_map = np.zeros(x.shape, dtype=np.float32)
window = butterworth_window(patch_size, 0.33, 4)
ib = 0
for s_iy, s_ix in get_range(x.shape, patch_size, stride):
if ib < batch_size:
data[ib, ..., 0] = x[s_iy, s_ix]
sy[ib] = s_iy
sx[ib] = s_ix
ib += 1
if ib == batch_size:
data = self.base_model.predict(data, batch_size=batch_size)
process_prediction(data, x_r, count_map, window, ib, sy, sx)
ib = 0
if ib != batch_size:
data = self.base_model.predict(data[:ib, ...], batch_size=batch_size)
process_prediction(data, x_r, count_map, window, ib, sy, sx)
# Normalize the denoised image using the count_map
x_r /= count_map
# Removing extra row or column if added
x = remove_extra_row_or_column_patch_based(x, x_i_sh_e)
return x_r
def load_network(model_name: str = 'sfr_hrstem'):
"""
Load r_em neural network model.
:param model_name: A string representing the name of the model.
:return: A tensorflow.keras.Model object.
"""
if os.path.isdir(model_name):
model_path = pathlib.Path(model_name).resolve()
else:
model_name = model_name.lower()
model_path = pathlib.Path(__file__).resolve().parent / 'models' / model_name
model = Model(model_path)
return model
def load_sim_test_data(file_name: str = 'sfr_hrstem') -> Tuple[np.ndarray, np.ndarray]:
"""
Load test data for r_em neural network.
:param model_name: A string representing the name of the model.
:return: A tuple containing two numpy arrays representing the input (x) and output (y) data.
"""
if os.path.isfile(file_name):
path = pathlib.Path(file_name).resolve()
else:
file_name = file_name.lower()
path = pathlib.Path(__file__).resolve().parent / 'test_data' / f'{file_name}.h5'
with h5py.File(path, 'r') as h5file:
x = np.asarray(h5file['x'][:], dtype=np.float32).transpose(0, 3, 2, 1)
y = np.asarray(h5file['y'][:], dtype=np.float32).transpose(0, 3, 2, 1)
return x, y
def load_hrstem_exp_test_data(file_name: str = 'exp_hrstem') -> Tuple[np.ndarray, np.ndarray]:
"""
Load test data for r_em neural network.
:param model_name: A string representing the name of the model.
:return: A tuple containing two numpy arrays representing the input (x) and output (y) data.
"""
if os.path.isfile(file_name):
path = pathlib.Path(file_name).resolve()
else:
file_name = file_name.lower()
path = pathlib.Path(__file__).resolve().parent / 'test_data' / f'{file_name}.h5'
with h5py.File(path, 'r') as f:
x = f['x'][:]
if x.ndim == 4:
x = np.asarray(x, dtype=np.float32).transpose(0, 3, 2, 1)
else:
x = np.asarray(x, dtype=np.float32).transpose(1, 0)
return x | 8,852 | 32.534091 | 136 | py |
pyRVtest | pyRVtest-main/docs/conf.py | """Sphinx configuration."""
import ast
import copy
import json
import os
from pathlib import Path
import re
import shutil
from typing import Any, Optional, Tuple
import astunparse
import sphinx.application
# get the location of the source directory
source_path = Path(__file__).resolve().parent
# project information
language = 'en'
project = 'pyRVtest'
copyright = '2023, Marco Duarte, Lorenzo Magnolfi, Mikkel Solvsten, Christopher Sullivan, and Anya Tarascina'
author = 'Marco Duarte, Lorenzo Magnolfi, Mikkel Solvsten, Christopher Sullivan, and Anya Tarascina'
# configure locations of other configuration files
templates_path = ['templates']
exclude_patterns = ['_build', '_downloads', 'notebooks', 'templates', '**.ipynb_checkpoints']
# identify the RTD version that's being built and associated URLs
rtd_version = os.environ.get('READTHEDOCS_VERSION', 'latest')
rtd_url = f'https://{project.lower()}.readthedocs.io/{language}/{rtd_version}'
pdf_url = f'https://readthedocs.org/projects/{project.lower()}/downloads/pdf/{rtd_version}'
# configure extensions
extensions = [
'sphinx.ext.autosectionlabel',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'nbsphinx'
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'patsy': ('https://patsy.readthedocs.io/en/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'pyhdfe': ('https://pyhdfe.readthedocs.io/en/stable/', None),
}
extlinks = {
'rtd': (f'{rtd_url}/%s', None),
'pdf': (f'{pdf_url}/%s', None)
}
mathjax3_config = {
'HTML-CSS': {
'matchFontHeight': False,
'fonts': ['Latin-Modern', 'TeX']
}
}
math_numfig = True
math_number_all = True
numfig_secnum_depth = 0
autosummary_generate = True
numpydoc_show_class_members = False
autosectionlabel_prefix_document = True
nbsphinx_allow_errors = True
# configure HTML information
html_theme = 'sphinx_rtd_theme'
def clean_directories() -> None:
"""Clean directories that will be generated."""
for name in ['_api', '_downloads', '_notebooks']:
shutil.rmtree(source_path / name, ignore_errors=True)
def process_notebooks() -> None:
"""Copy notebook files to _notebooks and _downloads, resetting executing counts and replacing domains with Markdown
equivalents.
"""
for notebook_path in Path(source_path / 'notebooks').glob('**/*.ipynb'):
notebook = json.loads(notebook_path.read_text())
download = copy.deepcopy(notebook)
# extract parts of the path relative to the notebooks directory and construct the directory's relative location
relative_parts = notebook_path.relative_to(source_path).parts[1:]
relative_location = '../' * len(relative_parts)
# manipulate notebook cells
for notebook_cell, download_cell in zip(notebook['cells'], download['cells']):
# reset download execution counts
for data in [download_cell] + download_cell.get('outputs', []):
if 'execution_count' in data:
data['execution_count'] = 1
# replace supported Sphinx domains with Markdown equivalents
if notebook_cell['cell_type'] == 'markdown':
for source_index, notebook_source in enumerate(notebook_cell['source']):
for role, content in re.findall(':([a-z]+):`([^`]+)`', notebook_source):
domain = f':{role}:`{content}`'
if role == 'ref':
document, text = content.split(':', 1)
section = re.sub(r'-+', '-', re.sub('[^0-9a-zA-Z]+', '-', text)).strip('-').lower()
elif role in {'mod', 'func', 'class', 'meth', 'attr', 'exc'}:
text = f'`{content}`'
section = f'{project}.{content}'
document = f'_api/{project}.{content}'
if role == 'mod':
section = f'module-{section}'
elif role == 'attr':
document = document.rsplit('.', 1)[0]
else:
raise NotImplementedError(f"The domain '{domain}' is not supported.")
# replace the domain with Markdown equivalents (reStructuredText doesn't support linked code)
notebook_cell['source'][source_index] = notebook_cell['source'][source_index].replace(
domain,
f'[{text.strip("`")}]({relative_location}{document}.rst#{section})'
)
download_cell['source'][source_index] = download_cell['source'][source_index].replace(
domain,
f'[{text}]({rtd_url}/{document}.html#{section})'
)
# save the updated notebook files
for updated, location in [(download, '_downloads'), (notebook, '_notebooks')]:
updated_path = source_path / Path(location, *relative_parts)
updated_path.parent.mkdir(parents=True, exist_ok=True)
updated_path.write_text(json.dumps(updated, indent=1, sort_keys=True, separators=(', ', ': ')))
def process_signature(*args: Any) -> Optional[Tuple[str, str]]:
"""Strip type hints from signatures."""
signature = args[5]
if signature is None:
return None
assert isinstance(signature, str)
node = ast.parse(f'def f{signature}: pass').body[0]
assert isinstance(node, ast.FunctionDef)
node.returns = None
if node.args.args:
for arg in node.args.args:
arg.annotation = None
return astunparse.unparse(node).splitlines()[2][5:-1], ''
def setup(app: sphinx.application.Sphinx) -> None:
"""Clean directories, process notebooks, configure extra resources, and strip type hints."""
clean_directories()
process_notebooks()
app.connect('autodoc-process-signature', process_signature)
| 6,312 | 39.729032 | 119 | py |
MCSE | MCSE-master/simcse_to_huggingface.py | """
Convert SimCSE's checkpoints to Huggingface style.
code from https://github.com/princeton-nlp/SimCSE
"""
import argparse
import torch
import os
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="Path of SimCSE checkpoint folder")
args = parser.parse_args()
print("SimCSE checkpoint -> Huggingface checkpoint for {}".format(args.path))
state_dict = torch.load(os.path.join(args.path, "pytorch_model.bin"), map_location=torch.device("cpu"))
new_state_dict = {}
for key, param in state_dict.items():
if "mlp" in key:
key = key.replace("mlp", "pooler")
# Delete "bert" or "roberta" prefix
if "bert." in key:
key = key.replace("bert.", "")
if "roberta." in key:
key = key.replace("roberta.", "")
new_state_dict[key] = param
torch.save(new_state_dict, os.path.join(args.path, "pytorch_model.bin"))
# Change architectures in config.json
config = json.load(open(os.path.join(args.path, "config.json")))
for i in range(len(config["architectures"])):
config["architectures"][i] = config["architectures"][i].replace("ForCL", "Model")
json.dump(config, open(os.path.join(args.path, "config.json"), "w"), indent=2)
if __name__ == "__main__":
main()
| 1,340 | 29.477273 | 107 | py |
MCSE | MCSE-master/src/utils.py | import sys
import torch
# Set path to SentEval
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def evaluate(model, tokenizer):
def prepare(params, samples):
return
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
for k in batch:
batch[k] = batch[k].to('cuda')
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
return outputs.last_hidden_state[:, 0].cpu() # unpooled [CLS] output in BERT
# Set params for SentEval (fastmode)
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
se = senteval.engine.SE(params, batcher, prepare)
tasks = ['STSBenchmark']
results = se.eval(tasks)
stsb_spearman = results['STSBenchmark']['dev']['spearman'][0]
stsb_align = results['STSBenchmark']['dev']['align_loss']
stsb_uniform = results['STSBenchmark']['dev']['uniform_loss']
metrics = {"eval_stsb_spearman": stsb_spearman,
"eval_stsb_align": stsb_align,
"eval_stsb_uniform": stsb_uniform}
return metrics
def inf_train_gen(trainloader):
while True:
for batch in trainloader:
yield batch | 1,592 | 30.235294 | 84 | py |
MCSE | MCSE-master/src/model.py | import torch
import torch.nn as nn
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions
class MLPLayer(nn.Module):
"""
Head for getting sentence representations over RoBERTa/BERT's CLS representation.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.dense = nn.Linear(in_dim, out_dim)
self.activation = nn.Tanh()
def forward(self, features, **kwargs):
x = self.dense(features)
x = self.activation(x) # non-linear activation
return x
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
def forward(self, x, y):
return self.cos(x, y) / self.temp
class BertForCL(BertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.args = model_kargs['model_args']
self.bert = BertModel(config)
self.pooler = MLPLayer(config.hidden_size, config.hidden_size)
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
):
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
)
pooler_output = self.pooler(outputs.last_hidden_state[:, 0])
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class RobertaForCL(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, *model_args, **model_kargs):
super().__init__(config)
self.args = model_kargs['model_args']
self.roberta = RobertaModel(config)
self.pooler = MLPLayer(config.hidden_size, config.hidden_size)
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True,
):
# Flatten input for encoding
input_ids = input_ids.view((-1, input_ids.size(-1))) # (bs * num_sent, len)
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) # (bs * num_sent len)
if token_type_ids is not None:
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) # (bs * num_sent, len)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=True,
)
pooler_output = self.pooler(outputs.last_hidden_state[:, 0])
return BaseModelOutputWithPoolingAndCrossAttentions(
pooler_output=pooler_output,
last_hidden_state=outputs.last_hidden_state,
hidden_states=outputs.hidden_states,
)
class ResNetVisnModel(nn.Module):
def __init__(self, feature_dim, proj_dim):
super().__init__()
self.mlp = MLPLayer(feature_dim, proj_dim) # visual features -> grounding space
def forward(self, x):
x = self.mlp(x)
x = x / x.norm(2, dim=-1, keepdim=True)
return x
class MCSE(nn.Module):
def __init__(self, lang_model, visn_model, args):
super().__init__()
self.args = args
self.lang_model = lang_model
self.visn_model = visn_model
self.grounding = MLPLayer(args.hidden_size, args.proj_dim) # sent embeddings -> grounding space
self.sim = Similarity(temp=self.args.temp)
self.sim_vl = Similarity(temp=self.args.temp_vl)
self.loss_fct = nn.CrossEntropyLoss()
def forward(self, batch):
lang_output = self.lang_model(input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'] if 'position_ids' in batch.keys() else None,
position_ids=batch['position_ids'] if 'position_ids' in batch.keys() else None)
batch_size = batch['input_ids'].size(0)
num_sent = batch['input_ids'].size(1)
# [bs*2, hidden] -> [bs, 2, hidden]
lang_pooled_output = lang_output.last_hidden_state[:, 0].view((batch_size, num_sent, -1))
lang_projection = lang_output.pooler_output.view((batch_size, num_sent, -1)) # [bs, 2, hidden], output of additional MLP layer
return lang_pooled_output, lang_projection
def compute_loss(self, batch, cal_inter=False):
l_pool, l_proj = self.forward(batch)
# Separate representation
z1, z2 = l_proj[:, 0], l_proj[:, 1] # (bs, hidden)
cos_sim = self.sim(z1.unsqueeze(1), z2.unsqueeze(0)) # (bs, bs)
labels = torch.arange(cos_sim.size(0)).long().to(self.args.device) # [0, 1, bs-1] (bs)
loss = self.loss_fct(cos_sim, labels) # unsup: bs-1 negatives
if not cal_inter:
return loss
else:
v = self.visn_model(batch['img']) # [bs, proj_dim]
l2v_proj = self.grounding(l_pool) # [bs, 2, proj_dim], output for vision grounding
l2v_proj = l2v_proj / l2v_proj.norm(2, dim=-1, keepdim=True)
p1, p2 = l2v_proj[:, 0], l2v_proj[:, 1] # (bs, proj)
cos_sim_p0 = self.sim_vl(p1.unsqueeze(1), v.unsqueeze(0)) # (bs, bs)
cos_sim_p1 = self.sim_vl(p2.unsqueeze(1), v.unsqueeze(0))
inter_loss = (self.loss_fct(cos_sim_p0, labels) + self.loss_fct(cos_sim_p1, labels)) / 2
return loss, inter_loss
| 7,235 | 35.730964 | 137 | py |
MCSE | MCSE-master/src/data.py | import torch
from torch.utils.data import Dataset
import h5py
import numpy as np
from torchvision.datasets.folder import default_loader
class ImgSentDataset(Dataset):
def __init__(self,
text_file,
feature_file=None,
shuffle_imgs=False,
random_imgs=False,
shot=-1):
self.text_file = text_file
self.feature_file = feature_file
self.shuffle_imgs = shuffle_imgs
self.random_imgs = random_imgs
self.shot = shot
self.raw_dataset = self.load_data()
def load_data(self):
data = []
sentonly = True if self.feature_file is None else False
# loading sentences
with open(self.text_file, 'r') as f:
sentences = [l.strip() for l in f.readlines()]
N = len(sentences)
# loading image features
if not sentonly:
with h5py.File(self.feature_file, "r") as f:
imgs = torch.from_numpy(np.array(f['features']))
if self.shuffle_imgs:
print('Ablation study: shuffling the imgs ')
index = np.random.choice(N, N, replace=False)
imgs = imgs[index]
if self.random_imgs:
print('Ablation study: select random imgs ')
index = np.random.choice(N, N, replace=True)
imgs = imgs[index]
for sent, img in zip(sentences, imgs):
d = {'sent': sent, 'img': img}
data.append(d)
else:
for sent in sentences:
d = {'sent': sent}
data.append(d)
if self.shot > 0:
index = np.random.choice(N, self.shot, replace=False)
data = np.array(data)[index].tolist()
return data
def __len__(self):
return len(self.raw_dataset)
def __getitem__(self, item:int):
datum = self.raw_dataset[item]
return datum
| 1,990 | 25.905405 | 65 | py |
MCSE | MCSE-master/src/evaluation.py | import sys
import os
import logging
import argparse
from prettytable import PrettyTable
import torch
from transformers import AutoModel, AutoTokenizer
# Set PATHs
PATH_TO_SENTEVAL = './SentEval'
PATH_TO_DATA = './SentEval/data'
# Import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def print_full_table(task_names, scores, aligns, uniforms, logging):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
tb.add_row(aligns)
tb.add_row(uniforms)
print(tb)
logging.info(tb)
def print_table(task_names, scores, logging):
tb = PrettyTable()
tb.field_names = task_names
tb.add_row(scores)
print(tb)
logging.info(tb)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str,
help="Transformers' model name or path")
parser.add_argument("--pooler", type=str,
choices=['cls', 'cls_before_pooler', 'avg', 'avg_top2', 'avg_first_last'],
default='cls',
help="Which pooler to use")
parser.add_argument("--mode", type=str,
choices=['dev', 'test', 'fasttest'],
default='test',
help="What evaluation mode to use (dev: fast mode, dev results; test: full mode, test results); fasttest: fast mode, test results")
parser.add_argument("--task_set", type=str,
choices=['sts', 'transfer', 'full', 'na'],
default='sts',
help="What set of tasks to evaluate on. If not 'na', this will override '--tasks'")
parser.add_argument("--tasks", type=str, nargs='+',
default=['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC',
'SICKRelatedness', 'STSBenchmark'],
help="Tasks to evaluate on. If '--task_set' is specified, this will be overridden")
args = parser.parse_args()
# Set up logger
logfile = os.path.join(args.model_name_or_path, 'eval_results.log')
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG, filename=logfile)
# Load transformers' model checkpoint
model = AutoModel.from_pretrained(args.model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
# Set up the tasks
if args.task_set == 'sts':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
elif args.task_set == 'transfer':
args.tasks = ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
elif args.task_set == 'full':
args.tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
args.tasks += ['MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'TREC', 'MRPC']
# Set params for SentEval
if args.mode == 'dev' or args.mode == 'fasttest':
# Fast mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
elif args.mode == 'test':
# Full mode
params = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10}
params['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
else:
raise NotImplementedError
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch, max_length=None):
# Handle rare token encoding issues in the dataset
if len(batch) >= 1 and len(batch[0]) >= 1 and isinstance(batch[0][0], bytes):
batch = [[word.decode('utf-8') for word in s] for s in batch]
sentences = [' '.join(s) for s in batch]
# Tokenization
if max_length is not None:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
max_length=max_length,
truncation=True
)
else:
batch = tokenizer.batch_encode_plus(
sentences,
return_tensors='pt',
padding=True,
)
# Move to the correct device
for k in batch:
batch[k] = batch[k].to(device)
# Get raw embeddings
with torch.no_grad():
outputs = model(**batch, output_hidden_states=True, return_dict=True)
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
# Apply different poolers
if args.pooler == 'cls':
# There is a linear+activation layer after CLS representation
return pooler_output.cpu()
elif args.pooler == 'cls_before_pooler':
return last_hidden[:, 0].cpu()
elif args.pooler == "avg":
return ((last_hidden * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch['attention_mask'].sum(
-1).unsqueeze(-1)).cpu()
elif args.pooler == "avg_first_last":
first_hidden = hidden_states[0]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / batch[
'attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
elif args.pooler == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * batch['attention_mask'].unsqueeze(-1)).sum(1) / \
batch['attention_mask'].sum(-1).unsqueeze(-1)
return pooled_result.cpu()
else:
raise NotImplementedError
results = {}
for task in args.tasks:
se = senteval.engine.SE(params, batcher, prepare)
result = se.eval(task)
results[task] = result
# Print evaluation results
if args.mode == 'dev':
print("------ %s ------" % (args.mode))
logging.info("------ %s ------" % (args.mode))
task_names = []
scores = []
for task in ['STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['dev']['spearman'][0] * 100))
else:
scores.append("0.00")
print_table(task_names, scores, logging)
task_names = []
scores = []
for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
task_names.append(task)
if task in results:
scores.append("%.2f" % (results[task]['devacc']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
print_table(task_names, scores, logging)
elif args.mode == 'test' or args.mode == 'fasttest':
print("------ %s ------" % (args.mode))
logging.info("------ %s ------" % (args.mode))
task_names = []
scores = []
aligns = []
uniforms = []
for task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']:
task_names.append(task)
if task in results:
if task in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
scores.append("%.2f" % (results[task]['all']['spearman']['all'] * 100))
aligns.append("%.3f" % (results[task]['all']['align_loss']['all']))
uniforms.append("%.3f" % (results[task]['all']['uniform_loss']['all']))
else:
scores.append("%.2f" % (results[task]['test']['spearman'].correlation * 100)) # for STSB and SICK.
aligns.append("%.3f" % (results[task]['test']['align_loss']))
uniforms.append("%.3f" % (results[task]['test']['uniform_loss']))
else:
scores.append("0.00")
task_names.append("Avg.")
scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
aligns.append("%.3f" % (sum([float(score) for score in aligns]) / len(aligns)))
uniforms.append("%.3f" % (sum([float(score) for score in uniforms]) / len(uniforms)))
# print_table(task_names, scores, logging)
print_full_table(task_names, scores, aligns, uniforms, logging)
#task_names = []
#scores = []
#for task in ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']:
# task_names.append(task)
# if task in results:
# scores.append("%.2f" % (results[task]['devacc']))
# else:
# scores.append("0.00")
#task_names.append("Avg.")
#scores.append("%.2f" % (sum([float(score) for score in scores]) / len(scores)))
#print_table(task_names, scores, logging)
if __name__ == "__main__":
main()
| 9,443 | 39.706897 | 155 | py |
MCSE | MCSE-master/src/train.py | import argparse
import logging
import math
import os
import random
import datasets
from torch.utils.data.dataloader import DataLoader
import torch
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from data import ImgSentDataset
from model import MCSE, BertForCL, RobertaForCL, ResNetVisnModel
from utils import evaluate
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
type=str,
default="simcse",
help="The framework to use.",
choices=["simcse", "mcse"]
)
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-uncased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--text_file",
type=str,
default=None,
help="A .txt file containing the training sentences."
)
parser.add_argument(
"--feature_file",
type=str,
default=None,
help="A .hdf5 file containing the image features (e.g. ResNet50 features)."
)
parser.add_argument(
"--shuffle_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--random_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--output_dir",
type=str,
default="result/",
help="Where to store the final model.")
parser.add_argument(
"--max_seq_length",
type=int,
default=32,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=64,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=64,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=3e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.0,
help="Weight decay to use.")
parser.add_argument(
"--num_train_epochs",
type=int,
default=3,
help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
default=1.0,
help="Maximum gradient norm for gradient clipping."
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="A seed for reproducible training. We used [0,1,2,3,4] in experiments.")
parser.add_argument(
"--temp",
type=float,
default=0.05,
help="Temperature for softmax.")
parser.add_argument(
"--temp_vl",
type=float,
default=0.05,
help="Temperature for cross-modality contrastive learning"
)
parser.add_argument(
"--hidden_size",
type=int,
default=768,
help="Text embedding dimention of pooled output (mlp)")
parser.add_argument(
"--proj_dim",
type=int,
default=256,
help="Projection dimension in grounding space")
parser.add_argument(
"--lbd",
type=float,
default=0.01,
help="weight for inter-modality loss")
parser.add_argument(
"--eval_steps",
type=int,
default=125,
help="evaluation step interval")
parser.add_argument(
"--metric_for_best_model",
type=str,
default='stsb_spearman',
help="for saving best checkpoint")
parser.add_argument(
"--gradient_accumulation_steps",
type = int,
default = 1,
help = "Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument(
"--shot",
type=int,
default=-1,
help="few-shot setting")
args = parser.parse_args()
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
args = parse_args()
print(args)
# Initialize the accelerator.
accelerator = Accelerator()
args.device = accelerator.device
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
set_seed(args.seed)
# Load pretrained tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
config = AutoConfig.from_pretrained(args.model_name_or_path)
if 'roberta' in args.model_name_or_path:
lang_model = RobertaForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
elif 'bert' in args.model_name_or_path:
lang_model = BertForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
else:
raise NotImplementedError
if args.framework.lower() == 'mcse':
visn_model = ResNetVisnModel(2048, args.proj_dim)
else:
visn_model = None
model = MCSE(lang_model, visn_model, args)
# Define collator function
def data_collator(batch):
keys = batch[0].keys()
sentences = [b['sent'] for b in batch]
new_batch = {}
total = len(sentences)
# tokenization
tokenized_sents = tokenizer(
sentences,
max_length=args.max_seq_length,
truncation=True,
padding="max_length" if args.pad_to_max_length else 'longest',
return_tensors='pt'
)
if 'img' in keys:
new_batch['img'] = torch.stack([batch[i]['img'] for i in range(total)])
for key in ['input_ids', 'attention_mask', 'token_type_ids', 'position_ids']:
# (bs, len) -> (bs, 2, len)
if key in tokenized_sents.keys():
new_batch[key] = tokenized_sents[key].unsqueeze(1).repeat(1, 2, 1)
return new_batch
# dataset and dataloader
train_dataset = ImgSentDataset(text_file=args.text_file,
feature_file=args.feature_file,
shuffle_imgs=args.shuffle_imgs,
random_imgs=args.random_imgs,
shot=args.shot)
train_dataloader = DataLoader(train_dataset,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Optimizer
# Split weights in two groups, one with weight decay and the other not. Same as examples in huggingface and sentence-transformer.
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader = accelerator.prepare(model, optimizer, train_dataloader)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
#num_update_steps_per_epoch = math.ceil(num_update_steps_per_epoch * args.percent)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
set_seed(args.seed) # for sake of the status change of sampler
# Train! num_processes -> 1
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Train train batch size (w.parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
best_metric = 0
for epoch in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
model.train()
if args.framework.lower() == 'mcse':
intra_loss, inter_loss = model.compute_loss(batch, cal_inter=True)
loss = intra_loss + args.lbd * inter_loss
else:
loss = model.compute_loss(batch, cal_inter=False)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if (step+1) % args.gradient_accumulation_steps == 0 or step == len(train_dataloader)-1:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (step+1) % args.gradient_accumulation_steps == 0 and (completed_steps % args.eval_steps == 0 or completed_steps >= args.max_train_steps):
logger.info("***** Start evaluation *****")
model.eval()
metrics = evaluate(model.lang_model, tokenizer)
logger.info(f" step {completed_steps}: eval_stsb_spearman = {metrics['eval_stsb_spearman']}")
if metrics['eval_'+args.metric_for_best_model] > best_metric:
# evaluation
best_metric = metrics['eval_'+args.metric_for_best_model]
# save (1) pytorch_model.bin (2) config.json
logger.info("Saving best model checkpoint to %s", args.output_dir)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(args.output_dir, 'mcse.pt')
)
if completed_steps >= args.max_train_steps:
path = os.path.join(args.output_dir, 'final_checkpoint')
logger.info("Saving final checkpoint to %s", path)
tokenizer.save_pretrained(path)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(path, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(path, 'mcse.pt')
)
break
logger.info("Training completed.")
if __name__ == "__main__":
main() | 15,256 | 34.399072 | 155 | py |
MCSE | MCSE-master/src/train_mix.py | import argparse
import logging
import math
import os
import datasets
from torch.utils.data.dataloader import DataLoader
import torch
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from data import ImgSentDataset
from model import MCSE, BertForCL, RobertaForCL, ResNetVisnModel
from utils import evaluate, inf_train_gen
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--framework",
type=str,
default="simcse",
help="The framework to use.",
choices=["simcse", "mcse"]
)
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-uncased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--caption_file",
type=str,
default=None,
help="A .txt file containing the caption sentences."
)
parser.add_argument(
"--feature_file",
type=str,
default=None,
help="A .hdf5 file containing the image features (e.g. ResNet50 features)."
)
parser.add_argument(
"--text_file",
type=str,
default=None,
help="A .txt file of unlabelled wiki sentences."
)
parser.add_argument(
"--shuffle_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--random_imgs",
action="store_true",
help="Ablation study for random imgs",
)
parser.add_argument(
"--output_dir",
type=str,
default="result/",
help="Where to store the final model.")
parser.add_argument(
"--max_seq_length",
type=int,
default=32,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=64,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=64,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=3e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.0,
help="Weight decay to use.")
parser.add_argument(
"--num_train_epochs",
type=int,
default=6,
help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--max_grad_norm",
type=float,
default=1.0,
help="Maximum gradient norm for gradient clipping."
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps",
type=int,
default=0,
help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="A seed for reproducible training. We used [0,1,2,3,4] in experiments.")
parser.add_argument(
"--temp",
type=float,
default=0.05,
help="Temperature for softmax.")
parser.add_argument(
"--temp_vl",
type=float,
default=0.05,
help="Temperature for cross-modality contrastive learning"
)
parser.add_argument(
"--hidden_size",
type=int,
default=768,
help="Text embedding dimention of pooled output (mlp)")
parser.add_argument(
"--proj_dim",
type=int,
default=256,
help="Projection dimension in grounding space")
parser.add_argument(
"--lbd",
type=float,
default=0.05,
help="weight for inter-modality loss")
parser.add_argument(
"--eval_steps",
type=int,
default=125,
help="evaluation step interval")
parser.add_argument(
"--metric_for_best_model",
type=str,
default='stsb_spearman',
help="for saving best checkpoint")
parser.add_argument(
"--gradient_accumulation_steps",
type = int,
default = 1,
help = "Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument(
"--shot",
type=int,
default=-1,
help="few-shot setting")
args = parser.parse_args()
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
args = parse_args()
print(args)
# Initialize the accelerator.
accelerator = Accelerator()
args.device = accelerator.device
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
set_seed(args.seed)
# Load pretrained tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=True)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
config = AutoConfig.from_pretrained(args.model_name_or_path)
if 'roberta' in args.model_name_or_path:
lang_model = RobertaForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
elif 'bert' in args.model_name_or_path:
lang_model = BertForCL.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
model_args=args
)
else:
raise NotImplementedError
if args.framework.lower() == 'mcse':
visn_model = ResNetVisnModel(2048, args.proj_dim)
else:
visn_model = None
model = MCSE(lang_model, visn_model, args)
# Define collator function
def data_collator(batch):
keys = batch[0].keys()
sentences = [b['sent'] for b in batch]
new_batch = {}
total = len(sentences)
# tokenization
tokenized_sents = tokenizer(
sentences,
max_length=args.max_seq_length,
truncation=True,
padding="max_length" if args.pad_to_max_length else 'longest',
return_tensors='pt'
)
if 'img' in keys:
new_batch['img'] = torch.stack([batch[i]['img'] for i in range(total)])
for key in ['input_ids', 'attention_mask', 'token_type_ids', 'position_ids']:
# (bs, len) -> (bs, 2, len)
if key in tokenized_sents.keys():
new_batch[key] = tokenized_sents[key].unsqueeze(1).repeat(1, 2, 1)
return new_batch
# dataset and dataloader (it's better to implement it by Sampler)
train_dataset_textonly = ImgSentDataset(text_file = args.text_file, feature_file = None)
train_dataloader_textonly = DataLoader(train_dataset_textonly,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
textonly_loader = inf_train_gen(train_dataloader_textonly)
train_dataset_pair = ImgSentDataset(text_file=args.caption_file, feature_file=args.feature_file)
train_dataloader_pair = DataLoader(train_dataset_pair,
shuffle=True,
batch_size=args.per_device_train_batch_size,
collate_fn=data_collator)
pair_loader = inf_train_gen(train_dataloader_pair)
# Optimizer
# Split weights in two groups, one with weight decay and the other not. Same as examples in huggingface and sentence-transformer.
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer = accelerator.prepare(model, optimizer)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil((len(train_dataloader_textonly) + len(train_dataloader_pair)) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
set_seed(args.seed) # for sake of the status change of sampler
# Train! num_processes -> 1
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num text examples = {len(train_dataset_textonly)}")
logger.info(f" Num paired examples = {len(train_dataset_pair)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Train train batch size (w.parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
best_metric = 0
paired_sample_step = len(train_dataloader_textonly) // len(train_dataloader_pair)
for epoch in range(args.num_train_epochs):
for step in range(num_update_steps_per_epoch):
model.train()
if step % paired_sample_step == 0:
batch = next(pair_loader)
for key in batch.keys():
batch[key] = batch[key].to('cuda')
else:
batch = next(textonly_loader)
for key in batch.keys():
batch[key] = batch[key].to('cuda')
if step % paired_sample_step == 0 and args.framework.lower() == 'mcse':
intra_loss, inter_loss = model.compute_loss(batch, cal_inter=True)
loss = intra_loss + args.lbd * inter_loss
else:
loss = model.compute_loss(batch, cal_inter=False)
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if (step+1) % args.gradient_accumulation_steps == 0 or step == num_update_steps_per_epoch-1:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if (step+1) % args.gradient_accumulation_steps == 0 and (completed_steps % args.eval_steps == 0 or completed_steps >= args.max_train_steps):
logger.info("***** Start evaluation *****")
model.eval()
metrics = evaluate(model.lang_model, tokenizer)
logger.info(f" step {completed_steps}: eval_stsb_spearman = {metrics['eval_stsb_spearman']}")
if metrics['eval_'+args.metric_for_best_model] > best_metric:
# evaluation
best_metric = metrics['eval_'+args.metric_for_best_model]
# save (1) pytorch_model.bin (2) config.json
logger.info("Saving best model checkpoint to %s", args.output_dir)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(args.output_dir, 'mcse.pt')
)
if completed_steps >= args.max_train_steps:
path = os.path.join(args.output_dir, 'final_checkpoint')
logger.info("Saving final checkpoint to %s", path)
tokenizer.save_pretrained(path)
accelerator.wait_for_everyone() # wait for all processes to reach that point in the script
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.lang_model.save_pretrained(path, save_function=accelerator.save)
if args.framework.lower() == 'mcse':
accelerator.save(
{
'visn_model': unwrapped_model.visn_model.state_dict(),
'grounding': unwrapped_model.grounding.state_dict()
},
os.path.join(path, 'mcse.pt')
)
break
logger.info("Training completed.")
if __name__ == "__main__":
main() | 16,025 | 34.852349 | 155 | py |
MCSE | MCSE-master/preprocess/extract_visn_feature.py | import os.path as osp
import h5py
import tqdm
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
from torchvision.datasets.folder import default_loader
def get_visn_arch(arch):
try:
return getattr(models, arch)
except AttributeError as e:
print(e)
print("There is no arch %s in torchvision." % arch)
class ResNet(nn.Module):
def __init__(self, arch='resnet50', pretrained=True):
"""
:param dim: dimension of the output
:param arch: backbone architecture,
:param pretrained: load feature with pre-trained vector
:param finetuning: finetune the model
"""
super().__init__()
# Setup Backbone
resnet = get_visn_arch(arch)(pretrained=pretrained)
for param in resnet.parameters():
param.requires_grad = False
resnet.fc = nn.Identity()
self.backbone = resnet
def forward(self, img):
"""
:param img: a tensor of shape [batch_size, H, W, C]
:return: a tensor of [batch_size, d]
"""
x = self.backbone(img)
x = x.detach()
return x
class ResnetFeatureExtractor():
def __init__(self, image_dir, output_dir, batch_size):
self.model= ResNet(arch='resnet50').eval().cuda()
self.image_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
self.image_dir = image_dir
self.output_dir = output_dir
self.batch_size = batch_size
def extract_vision_features(self, dataname, img_ids):
print('Start extracting resnet features...')
if dataname=='coco':
img_paths = [osp.join(self.image_dir, 'COCO_val2014_'+str(id).zfill(12)+'.jpg') for id in img_ids ]
else:
img_paths = [ osp.join(self.image_dir, id+'.jpg') for id in img_ids]
tensor_imgs = []
img_feats = []
last_dim = -1
for i, img_path in enumerate(tqdm.tqdm(img_paths)):
pil_img = default_loader(img_path)
tensor_imgs.append(self.image_transform(pil_img))
if len(tensor_imgs) == self.batch_size:
visn_input = torch.stack(tensor_imgs).cuda() #torch.Size([32, 3, 224, 224])
with torch.no_grad():
visn_output = self.model(visn_input) # torch.Size([32, 2048])
if last_dim == -1:
last_dim = visn_output.shape[-1] # 2048
img_feats.extend(visn_output.detach().cpu().numpy())
tensor_imgs = []
if len(tensor_imgs) > 0:
visn_input = torch.stack(tensor_imgs).cuda()
with torch.no_grad():
visn_output = self.model(visn_input)
# Saved the features in hdf5
img_feats.extend(visn_output.detach().cpu().numpy())
assert len(img_feats) == len(img_paths)
# Save features
h5_path = osp.join(self.output_dir, '%s.hdf5'%dataname)
print(f"\tSave features to {h5_path} with hdf5 dataset 'features'.")
h5_file = h5py.File(h5_path, 'w')
dset = h5_file.create_dataset("features", (len(img_paths), last_dim))
for i, img_feat in enumerate(img_feats):
dset[i] = img_feat
h5_file.close()
| 3,518 | 32.198113 | 112 | py |
MCSE | MCSE-master/SentEval/examples/infersent.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
InferSent models. See https://github.com/facebookresearch/InferSent.
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import os
import torch
import logging
# get model.py from InferSent repo
from models import InferSent
# Set PATHs
PATH_SENTEVAL = '../'
PATH_TO_DATA = '../data'
PATH_TO_W2V = 'PATH/TO/glove.840B.300d.txt' # or crawl-300d-2M.vec for V2
MODEL_PATH = 'infersent1.pkl'
V = 1 # version of InferSent
assert os.path.isfile(MODEL_PATH) and os.path.isfile(PATH_TO_W2V), \
'Set MODEL and GloVe PATHs'
# import senteval
sys.path.insert(0, PATH_SENTEVAL)
import senteval
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples], tokenize=False)
def batcher(params, batch):
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
return embeddings
"""
Evaluation of trained model on Transfer Tasks (SentEval)
"""
# define senteval params
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load InferSent model
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,
'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
model = InferSent(params_model)
model.load_state_dict(torch.load(MODEL_PATH))
model.set_w2v_path(PATH_TO_W2V)
params_senteval['infersent'] = model.cuda()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,462 | 30.987013 | 92 | py |
MCSE | MCSE-master/SentEval/examples/bow.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import sys
import io
import numpy as np
import logging
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# PATH_TO_VEC = 'glove/glove.840B.300d.txt'
PATH_TO_VEC = 'fasttext/crawl-300d-2M.vec'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# Create dictionary
def create_dictionary(sentences, threshold=0):
words = {}
for s in sentences:
for word in s:
words[word] = words.get(word, 0) + 1
if threshold > 0:
newwords = {}
for word in words:
if words[word] >= threshold:
newwords[word] = words[word]
words = newwords
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
# Get word vectors from vocabulary (glove, word2vec, fasttext ..)
def get_wordvec(path_to_vec, word2id):
word_vec = {}
with io.open(path_to_vec, 'r', encoding='utf-8') as f:
# if word2vec or fasttext file : skip first line "next(f)"
for line in f:
word, vec = line.split(' ', 1)
if word in word2id:
word_vec[word] = np.fromstring(vec, sep=' ')
logging.info('Found {0} words with word vectors, out of \
{1} words'.format(len(word_vec), len(word2id)))
return word_vec
# SentEval prepare and batcher
def prepare(params, samples):
_, params.word2id = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return
def batcher(params, batch):
batch = [sent if sent != [] else ['.'] for sent in batch]
embeddings = []
for sent in batch:
sentvec = []
for word in sent:
if word in params.word_vec:
sentvec.append(params.word_vec[word])
if not sentvec:
vec = np.zeros(params.wvec_dim)
sentvec.append(vec)
sentvec = np.mean(sentvec, 0)
embeddings.append(sentvec)
embeddings = np.vstack(embeddings)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 3,423 | 29.300885 | 82 | py |
MCSE | MCSE-master/SentEval/examples/googleuse.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division
import os
import sys
import logging
import tensorflow as tf
import tensorflow_hub as hub
tf.logging.set_verbosity(0)
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# tensorflow session
session = tf.Session()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params['google_use'](batch)
return embeddings
def make_embed_fn(module):
with tf.Graph().as_default():
sentences = tf.placeholder(tf.string)
embed = hub.Module(module)
embeddings = embed(sentences)
session = tf.train.MonitoredSession()
return lambda x: session.run(embeddings, {sentences: x})
# Start TF session and load Google Universal Sentence Encoder
encoder = make_embed_fn("https://tfhub.dev/google/universal-sentence-encoder-large/2")
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['google_use'] = encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,205 | 31.441176 | 86 | py |
MCSE | MCSE-master/SentEval/examples/models.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import numpy as np
import time
import torch
import torch.nn as nn
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
| 9,875 | 36.12782 | 94 | py |
MCSE | MCSE-master/SentEval/examples/gensen.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Clone GenSen repo here: https://github.com/Maluuba/gensen.git
And follow instructions for loading the model used in batcher
"""
from __future__ import absolute_import, division, unicode_literals
import sys
import logging
# import GenSen package
from gensen import GenSen, GenSenSingle
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
_, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
embeddings = reps_h_t
return embeddings
# Load GenSen model
gensen_1 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_2 = GenSenSingle(
model_folder='../data/models',
filename_prefix='nli_large_bothskip_parse',
pretrained_emb='../data/embedding/glove.840B.300d.h5'
)
gensen_encoder = GenSen(gensen_1, gensen_2)
reps_h, reps_h_t = gensen.get_representation(
sentences, pool='last', return_numpy=True, tokenize=True
)
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
params_senteval['gensen'] = gensen_encoder
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,429 | 31.4 | 82 | py |
MCSE | MCSE-master/SentEval/examples/skipthought.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
"""
Example of file for SkipThought in SentEval
"""
import logging
import sys
sys.setdefaultencoding('utf8')
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data/senteval_data/'
PATH_TO_SKIPTHOUGHT = ''
assert PATH_TO_SKIPTHOUGHT != '', 'Download skipthought and set correct PATH'
# import skipthought and Senteval
sys.path.insert(0, PATH_TO_SKIPTHOUGHT)
import skipthoughts
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
def prepare(params, samples):
return
def batcher(params, batch):
batch = [str(' '.join(sent), errors="ignore") if sent != [] else '.' for sent in batch]
embeddings = skipthoughts.encode(params['encoder'], batch,
verbose=False, use_eos=True)
return embeddings
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 10, 'batch_size': 512}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'adam', 'batch_size': 64,
'tenacity': 5, 'epoch_size': 4}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
# Load SkipThought model
params_senteval['encoder'] = skipthoughts.load_model()
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion']
results = se.eval(transfer_tasks)
print(results)
| 2,048 | 32.048387 | 97 | py |
MCSE | MCSE-master/SentEval/senteval/engine.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Generic sentence evaluation scripts wrapper
'''
from __future__ import absolute_import, division, unicode_literals
from senteval import utils
from senteval.binary import CREval, MREval, MPQAEval, SUBJEval
from senteval.snli import SNLIEval
from senteval.trec import TRECEval
from senteval.sick import SICKEntailmentEval, SICKEval
from senteval.mrpc import MRPCEval
from senteval.sts import STS12Eval, STS13Eval, STS14Eval, STS15Eval, STS16Eval, STSBenchmarkEval, SICKRelatednessEval, STSBenchmarkFinetune
from senteval.sst import SSTEval
from senteval.rank import ImageCaptionRetrievalEval
from senteval.probing import *
class SE(object):
def __init__(self, params, batcher, prepare=None):
# parameters
params = utils.dotdict(params)
params.usepytorch = True if 'usepytorch' not in params else params.usepytorch
params.seed = 1111 if 'seed' not in params else params.seed
params.batch_size = 128 if 'batch_size' not in params else params.batch_size
params.nhid = 0 if 'nhid' not in params else params.nhid
params.kfold = 5 if 'kfold' not in params else params.kfold
if 'classifier' not in params or not params['classifier']:
params.classifier = {'nhid': 0}
assert 'nhid' in params.classifier, 'Set number of hidden units in classifier config!!'
self.params = params
# batcher and prepare
self.batcher = batcher
self.prepare = prepare if prepare else lambda x, y: None
self.list_tasks = ['CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKRelatedness', 'SICKEntailment', 'STSBenchmark',
'SNLI', 'ImageCaptionRetrieval', 'STS12', 'STS13',
'STS14', 'STS15', 'STS16',
'Length', 'WordContent', 'Depth', 'TopConstituents',
'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
'OddManOut', 'CoordinationInversion', 'SICKRelatedness-finetune', 'STSBenchmark-finetune', 'STSBenchmark-fix']
def eval(self, name):
# evaluate on evaluation [name], either takes string or list of strings
if (isinstance(name, list)):
self.results = {x: self.eval(x) for x in name}
return self.results
tpath = self.params.task_path
assert name in self.list_tasks, str(name) + ' not in ' + str(self.list_tasks)
# Original SentEval tasks
if name == 'CR':
self.evaluation = CREval(tpath + '/downstream/CR', seed=self.params.seed)
elif name == 'MR':
self.evaluation = MREval(tpath + '/downstream/MR', seed=self.params.seed)
elif name == 'MPQA':
self.evaluation = MPQAEval(tpath + '/downstream/MPQA', seed=self.params.seed)
elif name == 'SUBJ':
self.evaluation = SUBJEval(tpath + '/downstream/SUBJ', seed=self.params.seed)
elif name == 'SST2':
self.evaluation = SSTEval(tpath + '/downstream/SST/binary', nclasses=2, seed=self.params.seed)
elif name == 'SST5':
self.evaluation = SSTEval(tpath + '/downstream/SST/fine', nclasses=5, seed=self.params.seed)
elif name == 'TREC':
self.evaluation = TRECEval(tpath + '/downstream/TREC', seed=self.params.seed)
elif name == 'MRPC':
self.evaluation = MRPCEval(tpath + '/downstream/MRPC', seed=self.params.seed)
elif name == 'SICKRelatedness':
self.evaluation = SICKRelatednessEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'STSBenchmark':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'STSBenchmark-fix':
self.evaluation = STSBenchmarkEval(tpath + '/downstream/STS/STSBenchmark-fix', seed=self.params.seed)
elif name == 'STSBenchmark-finetune':
self.evaluation = STSBenchmarkFinetune(tpath + '/downstream/STS/STSBenchmark', seed=self.params.seed)
elif name == 'SICKRelatedness-finetune':
self.evaluation = SICKEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SICKEntailment':
self.evaluation = SICKEntailmentEval(tpath + '/downstream/SICK', seed=self.params.seed)
elif name == 'SNLI':
self.evaluation = SNLIEval(tpath + '/downstream/SNLI', seed=self.params.seed)
elif name in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']:
fpath = name + '-en-test'
self.evaluation = eval(name + 'Eval')(tpath + '/downstream/STS/' + fpath, seed=self.params.seed)
elif name == 'ImageCaptionRetrieval':
self.evaluation = ImageCaptionRetrievalEval(tpath + '/downstream/COCO', seed=self.params.seed)
# Probing Tasks
elif name == 'Length':
self.evaluation = LengthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'WordContent':
self.evaluation = WordContentEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Depth':
self.evaluation = DepthEval(tpath + '/probing', seed=self.params.seed)
elif name == 'TopConstituents':
self.evaluation = TopConstituentsEval(tpath + '/probing', seed=self.params.seed)
elif name == 'BigramShift':
self.evaluation = BigramShiftEval(tpath + '/probing', seed=self.params.seed)
elif name == 'Tense':
self.evaluation = TenseEval(tpath + '/probing', seed=self.params.seed)
elif name == 'SubjNumber':
self.evaluation = SubjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'ObjNumber':
self.evaluation = ObjNumberEval(tpath + '/probing', seed=self.params.seed)
elif name == 'OddManOut':
self.evaluation = OddManOutEval(tpath + '/probing', seed=self.params.seed)
elif name == 'CoordinationInversion':
self.evaluation = CoordinationInversionEval(tpath + '/probing', seed=self.params.seed)
self.params.current_task = name
self.evaluation.do_prepare(self.params, self.prepare)
self.results = self.evaluation.run(self.params, self.batcher)
return self.results
| 6,525 | 49.2 | 139 | py |
MCSE | MCSE-master/SentEval/senteval/rank.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Image-Caption Retrieval with COCO dataset
'''
from __future__ import absolute_import, division, unicode_literals
import os
import sys
import logging
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
from senteval.tools.ranking import ImageSentenceRankingPytorch
class ImageCaptionRetrievalEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task: Image Caption Retrieval *****\n\n')
# Get captions and image features
self.seed = seed
train, dev, test = self.loadFile(task_path)
self.coco_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.coco_data['train']['sent'] + \
self.coco_data['dev']['sent'] + \
self.coco_data['test']['sent']
prepare(params, samples)
def loadFile(self, fpath):
coco = {}
for split in ['train', 'valid', 'test']:
list_sent = []
list_img_feat = []
if sys.version_info < (3, 0):
with open(os.path.join(fpath, split + '.pkl')) as f:
cocodata = pickle.load(f)
else:
with open(os.path.join(fpath, split + '.pkl'), 'rb') as f:
cocodata = pickle.load(f, encoding='latin1')
for imgkey in range(len(cocodata['features'])):
assert len(cocodata['image_to_caption_ids'][imgkey]) >= 5, \
cocodata['image_to_caption_ids'][imgkey]
for captkey in cocodata['image_to_caption_ids'][imgkey][0:5]:
sent = cocodata['captions'][captkey]['cleaned_caption']
sent += ' .' # add punctuation to end of sentence in COCO
list_sent.append(sent.encode('utf-8').split())
list_img_feat.append(cocodata['features'][imgkey])
assert len(list_sent) == len(list_img_feat) and \
len(list_sent) % 5 == 0
list_img_feat = np.array(list_img_feat).astype('float32')
coco[split] = {'sent': list_sent, 'imgfeat': list_img_feat}
return coco['train'], coco['valid'], coco['test']
def run(self, params, batcher):
coco_embed = {'train': {'sentfeat': [], 'imgfeat': []},
'dev': {'sentfeat': [], 'imgfeat': []},
'test': {'sentfeat': [], 'imgfeat': []}}
for key in self.coco_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
self.coco_data[key]['sent'] = np.array(self.coco_data[key]['sent'])
self.coco_data[key]['sent'], idx_sort = np.sort(self.coco_data[key]['sent']), np.argsort(self.coco_data[key]['sent'])
idx_unsort = np.argsort(idx_sort)
coco_embed[key]['X'] = []
nsent = len(self.coco_data[key]['sent'])
for ii in range(0, nsent, params.batch_size):
batch = self.coco_data[key]['sent'][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
coco_embed[key]['sentfeat'].append(embeddings)
coco_embed[key]['sentfeat'] = np.vstack(coco_embed[key]['sentfeat'])[idx_unsort]
coco_embed[key]['imgfeat'] = np.array(self.coco_data[key]['imgfeat'])
logging.info('Computed {0} embeddings'.format(key))
config = {'seed': self.seed, 'projdim': 1000, 'margin': 0.2}
clf = ImageSentenceRankingPytorch(train=coco_embed['train'],
valid=coco_embed['dev'],
test=coco_embed['test'],
config=config)
bestdevscore, r1_i2t, r5_i2t, r10_i2t, medr_i2t, \
r1_t2i, r5_t2i, r10_t2i, medr_t2i = clf.run()
logging.debug("\nTest scores | Image to text: \
{0}, {1}, {2}, {3}".format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
logging.debug("Test scores | Text to image: \
{0}, {1}, {2}, {3}\n".format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
return {'devacc': bestdevscore,
'acc': [(r1_i2t, r5_i2t, r10_i2t, medr_i2t),
(r1_t2i, r5_t2i, r10_t2i, medr_t2i)],
'ndev': len(coco_embed['dev']['sentfeat']),
'ntest': len(coco_embed['test']['sentfeat'])}
| 4,643 | 41.605505 | 129 | py |
MCSE | MCSE-master/SentEval/senteval/snli.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SNLI - Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import codecs
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SNLIEval(object):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : SNLI Entailment*****\n\n')
self.seed = seed
train1 = self.loadFile(os.path.join(taskpath, 's1.train'))
train2 = self.loadFile(os.path.join(taskpath, 's2.train'))
trainlabels = io.open(os.path.join(taskpath, 'labels.train'),
encoding='utf-8').read().splitlines()
valid1 = self.loadFile(os.path.join(taskpath, 's1.dev'))
valid2 = self.loadFile(os.path.join(taskpath, 's2.dev'))
validlabels = io.open(os.path.join(taskpath, 'labels.dev'),
encoding='utf-8').read().splitlines()
test1 = self.loadFile(os.path.join(taskpath, 's1.test'))
test2 = self.loadFile(os.path.join(taskpath, 's2.test'))
testlabels = io.open(os.path.join(taskpath, 'labels.test'),
encoding='utf-8').read().splitlines()
# sort data (by s2 first) to reduce padding
sorted_train = sorted(zip(train2, train1, trainlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
train2, train1, trainlabels = map(list, zip(*sorted_train))
sorted_valid = sorted(zip(valid2, valid1, validlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
valid2, valid1, validlabels = map(list, zip(*sorted_valid))
sorted_test = sorted(zip(test2, test1, testlabels),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
test2, test1, testlabels = map(list, zip(*sorted_test))
self.samples = train1 + train2 + valid1 + valid2 + test1 + test2
self.data = {'train': (train1, train2, trainlabels),
'valid': (valid1, valid2, validlabels),
'test': (test1, test2, testlabels)
}
def do_prepare(self, params, prepare):
return prepare(params, self.samples)
def loadFile(self, fpath):
with codecs.open(fpath, 'rb', 'latin-1') as f:
return [line.split() for line in
f.read().splitlines()]
def run(self, params, batcher):
self.X, self.y = {}, {}
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
for key in self.data:
if key not in self.X:
self.X[key] = []
if key not in self.y:
self.y[key] = []
input1, input2, mylabels = self.data[key]
enc_input = []
n_labels = len(mylabels)
for ii in range(0, n_labels, params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
enc_input.append(np.hstack((enc1, enc2, enc1 * enc2,
np.abs(enc1 - enc2))))
if (ii*params.batch_size) % (20000*params.batch_size) == 0:
logging.info("PROGRESS (encoding): %.2f%%" %
(100 * ii / n_labels))
self.X[key] = np.vstack(enc_input)
self.y[key] = [dico_label[y] for y in mylabels]
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'cudaEfficient': True,
'nhid': params.nhid, 'noreg': True}
config_classifier = copy.deepcopy(params.classifier)
config_classifier['max_epoch'] = 15
config_classifier['epoch_size'] = 1
config['classifier'] = config_classifier
clf = SplitClassifier(self.X, self.y, config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1} for SNLI\n'
.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.data['valid'][0]),
'ntest': len(self.data['test'][0])}
| 4,577 | 39.157895 | 75 | py |
MCSE | MCSE-master/SentEval/senteval/utils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import re
import inspect
from torch import optim
def create_dictionary(sentences):
words = {}
for s in sentences:
for word in s:
if word in words:
words[word] += 1
else:
words[word] = 1
words['<s>'] = 1e9 + 4
words['</s>'] = 1e9 + 3
words['<p>'] = 1e9 + 2
# words['<UNK>'] = 1e9 + 1
sorted_words = sorted(words.items(), key=lambda x: -x[1]) # inverse sort
id2word = []
word2id = {}
for i, (w, _) in enumerate(sorted_words):
id2word.append(w)
word2id[w] = i
return id2word, word2id
def cosine(u, v):
return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
| 2,713 | 27.270833 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/binary.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
Binary classifier and corresponding datasets : MR, CR, SUBJ, MPQA
'''
from __future__ import absolute_import, division, unicode_literals
import io
import os
import numpy as np
import logging
from senteval.tools.validation import InnerKFoldClassifier
class BinaryClassifierEval(object):
def __init__(self, pos, neg, seed=1111):
self.seed = seed
self.samples, self.labels = pos + neg, [1] * len(pos) + [0] * len(neg)
self.n_samples = len(self.samples)
def do_prepare(self, params, prepare):
# prepare is given the whole text
return prepare(params, self.samples)
# prepare puts everything it outputs in "params" : params.word2id etc
# Those output will be further used by "batcher".
def loadFile(self, fpath):
with io.open(fpath, 'r', encoding='latin-1') as f:
return [line.split() for line in f.read().splitlines()]
def run(self, params, batcher):
enc_input = []
# Sort to reduce padding
sorted_corpus = sorted(zip(self.samples, self.labels),
key=lambda z: (len(z[0]), z[1]))
sorted_samples = [x for (x, y) in sorted_corpus]
sorted_labels = [y for (x, y) in sorted_corpus]
logging.info('Generating sentence embeddings')
for ii in range(0, self.n_samples, params.batch_size):
batch = sorted_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
enc_input.append(embeddings)
enc_input = np.vstack(enc_input)
logging.info('Generated sentence embeddings')
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = InnerKFoldClassifier(enc_input, np.array(sorted_labels), config)
devacc, testacc = clf.run()
logging.debug('Dev acc : {0} Test acc : {1}\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': self.n_samples,
'ntest': self.n_samples}
class CREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : CR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'custrev.pos'))
neg = self.loadFile(os.path.join(task_path, 'custrev.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class MREval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MR *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'rt-polarity.pos'))
neg = self.loadFile(os.path.join(task_path, 'rt-polarity.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
class SUBJEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SUBJ *****\n\n')
obj = self.loadFile(os.path.join(task_path, 'subj.objective'))
subj = self.loadFile(os.path.join(task_path, 'subj.subjective'))
super(self.__class__, self).__init__(obj, subj, seed)
class MPQAEval(BinaryClassifierEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : MPQA *****\n\n')
pos = self.loadFile(os.path.join(task_path, 'mpqa.pos'))
neg = self.loadFile(os.path.join(task_path, 'mpqa.neg'))
super(self.__class__, self).__init__(pos, neg, seed)
| 3,712 | 38.924731 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/mrpc.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
MRPC : Microsoft Research Paraphrase (detection) Corpus
'''
from __future__ import absolute_import, division, unicode_literals
import os
import logging
import numpy as np
import io
from senteval.tools.validation import KFoldClassifier
from sklearn.metrics import f1_score
class MRPCEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : MRPC *****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path,
'msr_paraphrase_train.txt'))
test = self.loadFile(os.path.join(task_path,
'msr_paraphrase_test.txt'))
self.mrpc_data = {'train': train, 'test': test}
def do_prepare(self, params, prepare):
# TODO : Should we separate samples in "train, test"?
samples = self.mrpc_data['train']['X_A'] + \
self.mrpc_data['train']['X_B'] + \
self.mrpc_data['test']['X_A'] + self.mrpc_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
mrpc_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
mrpc_data['X_A'].append(text[3].split())
mrpc_data['X_B'].append(text[4].split())
mrpc_data['y'].append(text[0])
mrpc_data['X_A'] = mrpc_data['X_A'][1:]
mrpc_data['X_B'] = mrpc_data['X_B'][1:]
mrpc_data['y'] = [int(s) for s in mrpc_data['y'][1:]]
return mrpc_data
def run(self, params, batcher):
mrpc_embed = {'train': {}, 'test': {}}
for key in self.mrpc_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
text_data = {}
sorted_corpus = sorted(zip(self.mrpc_data[key]['X_A'],
self.mrpc_data[key]['X_B'],
self.mrpc_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
text_data['A'] = [x for (x, y, z) in sorted_corpus]
text_data['B'] = [y for (x, y, z) in sorted_corpus]
text_data['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['A', 'B']:
mrpc_embed[key][txt_type] = []
for ii in range(0, len(text_data['y']), params.batch_size):
batch = text_data[txt_type][ii:ii + params.batch_size]
embeddings = batcher(params, batch)
mrpc_embed[key][txt_type].append(embeddings)
mrpc_embed[key][txt_type] = np.vstack(mrpc_embed[key][txt_type])
mrpc_embed[key]['y'] = np.array(text_data['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = mrpc_embed['train']['A']
trainB = mrpc_embed['train']['B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = mrpc_embed['train']['y']
# Test
testA = mrpc_embed['test']['A']
testB = mrpc_embed['test']['B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = mrpc_embed['test']['y']
config = {'nclasses': 2, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid, 'kfold': params.kfold}
clf = KFoldClassifier(train={'X': trainF, 'y': trainY},
test={'X': testF, 'y': testY}, config=config)
devacc, testacc, yhat = clf.run()
testf1 = round(100*f1_score(testY, yhat), 2)
logging.debug('Dev acc : {0} Test acc {1}; Test F1 {2} for MRPC.\n'
.format(devacc, testacc, testf1))
return {'devacc': devacc, 'acc': testacc, 'f1': testf1,
'ndev': len(trainA), 'ntest': len(testA)}
| 4,202 | 39.028571 | 80 | py |
MCSE | MCSE-master/SentEval/senteval/sts.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
STS-{2012,2013,2014,2015,2016} (unsupervised) and
STS-benchmark (supervised) tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import numpy as np
import logging
import torch
from torch.nn.functional import normalize
from scipy.stats import spearmanr, pearsonr
from senteval.utils import cosine
from senteval.sick import SICKEval
# Question: normalization before evaluation?
def align_loss(x, y, alpha=2):
if min(x.shape)==0 or min(y.shape)==0:
return 0
x = normalize(x, p=2, dim=1)
y = normalize(y, p=2, dim=1)
return (x - y).norm(p=2, dim=1).pow(alpha).mean()
def uniform_loss(x, t=2):
# pdist: Computes the p-norm distance between every pair of row vectors in the input
if min(x.shape) == 0:
return 0
x = normalize(x, p=2, dim=1)
return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log()
class STSEval(object):
def loadFile(self, fpath):
self.data = {}
self.samples = []
for dataset in self.datasets:
sent1, sent2 = zip(*[l.split("\t") for l in
io.open(fpath + '/STS.input.%s.txt' % dataset,
encoding='utf8').read().splitlines()])
raw_scores = np.array([x for x in
io.open(fpath + '/STS.gs.%s.txt' % dataset,
encoding='utf8')
.read().splitlines()])
not_empty_idx = raw_scores != ''
gs_scores = [float(x) for x in raw_scores[not_empty_idx]]
sent1 = np.array([s.split() for s in sent1])[not_empty_idx]
sent2 = np.array([s.split() for s in sent2])[not_empty_idx]
# sort data by length to minimize padding in batcher
sorted_data = sorted(zip(sent1, sent2, gs_scores),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
sent1, sent2, gs_scores = map(list, zip(*sorted_data))
self.data[dataset] = (sent1, sent2, gs_scores)
self.samples += sent1 + sent2
def do_prepare(self, params, prepare):
if 'similarity' in params:
self.similarity = params.similarity
else: # Default similarity is cosine
self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))
return prepare(params, self.samples)
def run(self, params, batcher):
results = {}
all_sys_scores = []
all_gs_scores = []
################# newly added
all_loss_align = []
all_loss_uniform = []
#################
for dataset in self.datasets:
loss_align = []
loss_uniform = []
sys_scores = []
input1, input2, gs_scores = self.data[dataset]
for ii in range(0, len(gs_scores), params.batch_size):
batch1 = input1[ii:ii + params.batch_size]
batch2 = input2[ii:ii + params.batch_size]
batch_gs_scores = gs_scores[ii:ii + params.batch_size] # newly added
# we assume get_batch already throws out the faulty ones
if len(batch1) == len(batch2) and len(batch1) > 0:
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
################# newly added
pos_indices = [i for i in range(len(batch_gs_scores)) if batch_gs_scores[i] >= 4.0]
enc1_pos = enc1[pos_indices]
enc2_pos = enc2[pos_indices]
loss1 = align_loss(enc1_pos, enc2_pos)
loss2 = uniform_loss(torch.cat((enc1, enc2), dim=0))
loss_align.append(loss1)
loss_uniform.append(loss2)
#################
for kk in range(enc2.shape[0]):
sys_score = self.similarity(enc1[kk], enc2[kk])
sys_scores.append(sys_score)
all_sys_scores.extend(sys_scores)
all_gs_scores.extend(gs_scores)
all_loss_align.extend(loss_align)
all_loss_uniform.extend(loss_uniform)
results[dataset] = {'pearson': pearsonr(sys_scores, gs_scores),
'spearman': spearmanr(sys_scores, gs_scores),
'nsamples': len(sys_scores),
'align_loss': np.mean(loss_align, dtype='float64'), # newly added
'uniform_loss': np.mean(loss_uniform, dtype='float64')} # newly added
logging.debug('%s : pearson = %.4f, spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' %
(dataset, results[dataset]['pearson'][0],
results[dataset]['spearman'][0], results[dataset]['align_loss'],
results[dataset]['uniform_loss']))
weights = [results[dset]['nsamples'] for dset in results.keys()]
list_prs = np.array([results[dset]['pearson'][0] for
dset in results.keys()])
list_spr = np.array([results[dset]['spearman'][0] for
dset in results.keys()])
list_align = np.array([results[dset]['align_loss'] for
dset in results.keys()])
list_uniform = np.array([results[dset]['uniform_loss'] for
dset in results.keys()])
avg_pearson = np.average(list_prs)
avg_spearman = np.average(list_spr)
avg_align = np.average(list_align)
avg_uniform = np.average(list_uniform)
wavg_pearson = np.average(list_prs, weights=weights)
wavg_spearman = np.average(list_spr, weights=weights)
wavg_align = np.average(list_align, weights=weights)
wavg_uniform = np.average(list_uniform, weights=weights)
all_pearson = pearsonr(all_sys_scores, all_gs_scores)
all_spearman = spearmanr(all_sys_scores, all_gs_scores)
all_align = np.mean(all_loss_align)
all_uniform = np.mean(all_loss_uniform)
results['all'] = {'pearson': {'all': all_pearson[0],
'mean': avg_pearson,
'wmean': wavg_pearson},
'spearman': {'all': all_spearman[0],
'mean': avg_spearman,
'wmean': wavg_spearman},
'align_loss':{'all': all_align,
'mean': avg_align,
'wmean': wavg_align},
'uniform_loss':{'all': all_uniform,
'mean': avg_uniform,
'wmean': wavg_uniform}}
logging.debug('ALL : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' % (all_pearson[0], all_spearman[0], all_align, all_uniform))
logging.debug('ALL (weighted average) : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f' % (wavg_pearson, wavg_spearman, wavg_align, wavg_uniform))
logging.debug('ALL (average) : Pearson = %.4f, \
Spearman = %.4f, align_loss = %.4f, uniform_loss = %.4f\n' % (avg_pearson, avg_spearman, avg_align, avg_uniform))
return results
class STS12Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS12 *****\n\n')
self.seed = seed
self.datasets = ['MSRpar', 'MSRvid', 'SMTeuroparl',
'surprise.OnWN', 'surprise.SMTnews']
self.loadFile(taskpath)
class STS13Eval(STSEval):
# STS13 here does not contain the "SMT" subtask due to LICENSE issue
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS13 (-SMT) *****\n\n')
self.seed = seed
self.datasets = ['FNWN', 'headlines', 'OnWN']
self.loadFile(taskpath)
class STS14Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS14 *****\n\n')
self.seed = seed
self.datasets = ['deft-forum', 'deft-news', 'headlines',
'images', 'OnWN', 'tweet-news']
self.loadFile(taskpath)
class STS15Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS15 *****\n\n')
self.seed = seed
self.datasets = ['answers-forums', 'answers-students',
'belief', 'headlines', 'images']
self.loadFile(taskpath)
class STS16Eval(STSEval):
def __init__(self, taskpath, seed=1111):
logging.debug('***** Transfer task : STS16 *****\n\n')
self.seed = seed
self.datasets = ['answer-answer', 'headlines', 'plagiarism',
'postediting', 'question-question']
self.loadFile(taskpath)
class STSBenchmarkEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split()) # sentence1
sick_data['X_B'].append(text[6].split()) # sentence2
sick_data['y'].append(text[4]) # scores
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
class STSBenchmarkFinetune(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'sts-train.csv'))
dev = self.loadFile(os.path.join(task_path, 'sts-dev.csv'))
test = self.loadFile(os.path.join(task_path, 'sts-test.csv'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
text = line.strip().split('\t')
sick_data['X_A'].append(text[5].split())
sick_data['X_B'].append(text[6].split())
sick_data['y'].append(text[4])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
class SICKRelatednessEval(STSEval):
def __init__(self, task_path, seed=1111):
logging.debug('\n\n***** Transfer task : SICKRelatedness*****\n\n')
self.seed = seed
self.samples = []
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.datasets = ['train', 'dev', 'test']
self.data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split()) # sentence 1
sick_data['X_B'].append(text[2].split()) # sentence 2
sick_data['y'].append(text[3]) # relatedness_score
sick_data['y'] = [float(s) for s in sick_data['y']]
self.samples += sick_data['X_A'] + sick_data["X_B"]
return (sick_data['X_A'], sick_data["X_B"], sick_data['y'])
| 12,674 | 42.407534 | 129 | py |
MCSE | MCSE-master/SentEval/senteval/probing.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
probing tasks
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import copy
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class PROBINGEval(object):
def __init__(self, task, task_path, seed=1111):
self.seed = seed
self.task = task
logging.debug('***** (Probing) Transfer task : %s classification *****', self.task.upper())
self.task_data = {'train': {'X': [], 'y': []},
'dev': {'X': [], 'y': []},
'test': {'X': [], 'y': []}}
self.loadFile(task_path)
logging.info('Loaded %s train - %s dev - %s test for %s' %
(len(self.task_data['train']['y']), len(self.task_data['dev']['y']),
len(self.task_data['test']['y']), self.task))
def do_prepare(self, params, prepare):
samples = self.task_data['train']['X'] + self.task_data['dev']['X'] + \
self.task_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
self.tok2split = {'tr': 'train', 'va': 'dev', 'te': 'test'}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip().split('\t')
self.task_data[self.tok2split[line[0]]]['X'].append(line[-1].split())
self.task_data[self.tok2split[line[0]]]['y'].append(line[1])
labels = sorted(np.unique(self.task_data['train']['y']))
self.tok2label = dict(zip(labels, range(len(labels))))
self.nclasses = len(self.tok2label)
for split in self.task_data:
for i, y in enumerate(self.task_data[split]['y']):
self.task_data[split]['y'][i] = self.tok2label[y]
def run(self, params, batcher):
task_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
logging.info('Computing embeddings for train/dev/test')
for key in self.task_data:
# Sort to reduce padding
sorted_data = sorted(zip(self.task_data[key]['X'],
self.task_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.task_data[key]['X'], self.task_data[key]['y'] = map(list, zip(*sorted_data))
task_embed[key]['X'] = []
for ii in range(0, len(self.task_data[key]['y']), bsize):
batch = self.task_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
task_embed[key]['X'].append(embeddings)
task_embed[key]['X'] = np.vstack(task_embed[key]['X'])
task_embed[key]['y'] = np.array(self.task_data[key]['y'])
logging.info('Computed embeddings')
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
if self.task == "WordContent" and params.classifier['nhid'] > 0:
config_classifier = copy.deepcopy(config_classifier)
config_classifier['classifier']['nhid'] = 0
print(params.classifier['nhid'])
clf = SplitClassifier(X={'train': task_embed['train']['X'],
'valid': task_embed['dev']['X'],
'test': task_embed['test']['X']},
y={'train': task_embed['train']['y'],
'valid': task_embed['dev']['y'],
'test': task_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : %.1f Test acc : %.1f for %s classification\n' % (devacc, testacc, self.task.upper()))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(task_embed['dev']['X']),
'ntest': len(task_embed['test']['X'])}
"""
Surface Information
"""
class LengthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'sentence_length.txt')
# labels: bins
PROBINGEval.__init__(self, 'Length', task_path, seed)
class WordContentEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'word_content.txt')
# labels: 200 target words
PROBINGEval.__init__(self, 'WordContent', task_path, seed)
"""
Latent Structural Information
"""
class DepthEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'tree_depth.txt')
# labels: bins
PROBINGEval.__init__(self, 'Depth', task_path, seed)
class TopConstituentsEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'top_constituents.txt')
# labels: 'PP_NP_VP_.' .. (20 classes)
PROBINGEval.__init__(self, 'TopConstituents', task_path, seed)
class BigramShiftEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'bigram_shift.txt')
# labels: 0 or 1
PROBINGEval.__init__(self, 'BigramShift', task_path, seed)
# TODO: Voice?
"""
Latent Semantic Information
"""
class TenseEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'past_present.txt')
# labels: 'PRES', 'PAST'
PROBINGEval.__init__(self, 'Tense', task_path, seed)
class SubjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'subj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'SubjNumber', task_path, seed)
class ObjNumberEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'obj_number.txt')
# labels: 'NN', 'NNS'
PROBINGEval.__init__(self, 'ObjNumber', task_path, seed)
class OddManOutEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'odd_man_out.txt')
# labels: 'O', 'C'
PROBINGEval.__init__(self, 'OddManOut', task_path, seed)
class CoordinationInversionEval(PROBINGEval):
def __init__(self, task_path, seed=1111):
task_path = os.path.join(task_path, 'coordination_inversion.txt')
# labels: 'O', 'I'
PROBINGEval.__init__(self, 'CoordinationInversion', task_path, seed)
| 6,786 | 38.459302 | 120 | py |
MCSE | MCSE-master/SentEval/senteval/sick.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SICK Relatedness and Entailment
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from sklearn.metrics import mean_squared_error
from scipy.stats import pearsonr, spearmanr
from senteval.tools.relatedness import RelatednessPytorch
from senteval.tools.validation import SplitClassifier
class SICKEval(object):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Relatedness*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sick_data['train']['X_A'] + \
self.sick_data['train']['X_B'] + \
self.sick_data['dev']['X_A'] + \
self.sick_data['dev']['X_B'] + \
self.sick_data['test']['X_A'] + self.sick_data['test']['X_B']
return prepare(params, samples)
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[3])
sick_data['y'] = [float(s) for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
sick_embed[key]['y'] = np.array(self.sick_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = self.encode_labels(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = self.encode_labels(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = self.encode_labels(self.sick_data['test']['y'])
config = {'seed': self.seed, 'nclasses': 5}
clf = RelatednessPytorch(train={'X': trainF, 'y': trainY},
valid={'X': devF, 'y': devY},
test={'X': testF, 'y': testY},
devscores=self.sick_data['dev']['y'],
config=config)
devspr, yhat = clf.run()
pr = pearsonr(yhat, self.sick_data['test']['y'])[0]
sr = spearmanr(yhat, self.sick_data['test']['y'])[0]
pr = 0 if pr != pr else pr
sr = 0 if sr != sr else sr
se = mean_squared_error(yhat, self.sick_data['test']['y'])
logging.debug('Dev : Spearman {0}'.format(devspr))
logging.debug('Test : Pearson {0} Spearman {1} MSE {2} \
for SICK Relatedness\n'.format(pr, sr, se))
return {'devspearman': devspr, 'pearson': pr, 'spearman': sr, 'mse': se,
'yhat': yhat, 'ndev': len(devA), 'ntest': len(testA)}
def encode_labels(self, labels, nclass=5):
"""
Label encoding from Tree LSTM paper (Tai, Socher, Manning)
"""
Y = np.zeros((len(labels), nclass)).astype('float32')
for j, y in enumerate(labels):
for i in range(nclass):
if i+1 == np.floor(y) + 1:
Y[j, i] = y - np.floor(y)
if i+1 == np.floor(y):
Y[j, i] = np.floor(y) - y + 1
return Y
class SICKEntailmentEval(SICKEval):
def __init__(self, task_path, seed=1111):
logging.debug('***** Transfer task : SICK-Entailment*****\n\n')
self.seed = seed
train = self.loadFile(os.path.join(task_path, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(task_path, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(task_path, 'SICK_test_annotated.txt'))
self.sick_data = {'train': train, 'dev': dev, 'test': test}
def loadFile(self, fpath):
label2id = {'CONTRADICTION': 0, 'NEUTRAL': 1, 'ENTAILMENT': 2}
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
sick_data['y'] = [label2id[s] for s in sick_data['y']]
return sick_data
def run(self, params, batcher):
sick_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sick_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_corpus = sorted(zip(self.sick_data[key]['X_A'],
self.sick_data[key]['X_B'],
self.sick_data[key]['y']),
key=lambda z: (len(z[0]), len(z[1]), z[2]))
self.sick_data[key]['X_A'] = [x for (x, y, z) in sorted_corpus]
self.sick_data[key]['X_B'] = [y for (x, y, z) in sorted_corpus]
self.sick_data[key]['y'] = [z for (x, y, z) in sorted_corpus]
for txt_type in ['X_A', 'X_B']:
sick_embed[key][txt_type] = []
for ii in range(0, len(self.sick_data[key]['y']), bsize):
batch = self.sick_data[key][txt_type][ii:ii + bsize]
embeddings = batcher(params, batch)
sick_embed[key][txt_type].append(embeddings)
sick_embed[key][txt_type] = np.vstack(sick_embed[key][txt_type])
logging.info('Computed {0} embeddings'.format(key))
# Train
trainA = sick_embed['train']['X_A']
trainB = sick_embed['train']['X_B']
trainF = np.c_[np.abs(trainA - trainB), trainA * trainB]
trainY = np.array(self.sick_data['train']['y'])
# Dev
devA = sick_embed['dev']['X_A']
devB = sick_embed['dev']['X_B']
devF = np.c_[np.abs(devA - devB), devA * devB]
devY = np.array(self.sick_data['dev']['y'])
# Test
testA = sick_embed['test']['X_A']
testB = sick_embed['test']['X_B']
testF = np.c_[np.abs(testA - testB), testA * testB]
testY = np.array(self.sick_data['test']['y'])
config = {'nclasses': 3, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'nhid': params.nhid}
clf = SplitClassifier(X={'train': trainF, 'valid': devF, 'test': testF},
y={'train': trainY, 'valid': devY, 'test': testY},
config=config)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SICK entailment\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(devA), 'ntest': len(testA)}
| 9,243 | 41.599078 | 80 | py |
MCSE | MCSE-master/SentEval/senteval/trec.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
TREC question-type classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import KFoldClassifier
class TRECEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : TREC *****\n\n')
self.seed = seed
self.train = self.loadFile(os.path.join(task_path, 'train_5500.label'))
self.test = self.loadFile(os.path.join(task_path, 'TREC_10.label'))
def do_prepare(self, params, prepare):
samples = self.train['X'] + self.test['X']
return prepare(params, samples)
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
tgt2idx = {'ABBR': 0, 'DESC': 1, 'ENTY': 2,
'HUM': 3, 'LOC': 4, 'NUM': 5}
with io.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
assert target in tgt2idx, target
trec_data['X'].append(sample)
trec_data['y'].append(tgt2idx[target])
return trec_data
def run(self, params, batcher):
train_embeddings, test_embeddings = [], []
# Sort to reduce padding
sorted_corpus_train = sorted(zip(self.train['X'], self.train['y']),
key=lambda z: (len(z[0]), z[1]))
train_samples = [x for (x, y) in sorted_corpus_train]
train_labels = [y for (x, y) in sorted_corpus_train]
sorted_corpus_test = sorted(zip(self.test['X'], self.test['y']),
key=lambda z: (len(z[0]), z[1]))
test_samples = [x for (x, y) in sorted_corpus_test]
test_labels = [y for (x, y) in sorted_corpus_test]
# Get train embeddings
for ii in range(0, len(train_labels), params.batch_size):
batch = train_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
train_embeddings.append(embeddings)
train_embeddings = np.vstack(train_embeddings)
logging.info('Computed train embeddings')
# Get test embeddings
for ii in range(0, len(test_labels), params.batch_size):
batch = test_samples[ii:ii + params.batch_size]
embeddings = batcher(params, batch)
test_embeddings.append(embeddings)
test_embeddings = np.vstack(test_embeddings)
logging.info('Computed test embeddings')
config_classifier = {'nclasses': 6, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier,
'kfold': params.kfold}
clf = KFoldClassifier({'X': train_embeddings,
'y': np.array(train_labels)},
{'X': test_embeddings,
'y': np.array(test_labels)},
config_classifier)
devacc, testacc, _ = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} \
for TREC\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(self.train['X']), 'ntest': len(self.test['X'])}
| 3,565 | 38.622222 | 79 | py |
MCSE | MCSE-master/SentEval/senteval/sst.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
'''
SST - binary classification
'''
from __future__ import absolute_import, division, unicode_literals
import os
import io
import logging
import numpy as np
from senteval.tools.validation import SplitClassifier
class SSTEval(object):
def __init__(self, task_path, nclasses=2, seed=1111):
self.seed = seed
# binary of fine-grained
assert nclasses in [2, 5]
self.nclasses = nclasses
self.task_name = 'Binary' if self.nclasses == 2 else 'Fine-Grained'
logging.debug('***** Transfer task : SST %s classification *****\n\n', self.task_name)
train = self.loadFile(os.path.join(task_path, 'sentiment-train'))
dev = self.loadFile(os.path.join(task_path, 'sentiment-dev'))
test = self.loadFile(os.path.join(task_path, 'sentiment-test'))
self.sst_data = {'train': train, 'dev': dev, 'test': test}
def do_prepare(self, params, prepare):
samples = self.sst_data['train']['X'] + self.sst_data['dev']['X'] + \
self.sst_data['test']['X']
return prepare(params, samples)
def loadFile(self, fpath):
sst_data = {'X': [], 'y': []}
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if self.nclasses == 2:
sample = line.strip().split('\t')
sst_data['y'].append(int(sample[1]))
sst_data['X'].append(sample[0].split())
elif self.nclasses == 5:
sample = line.strip().split(' ', 1)
sst_data['y'].append(int(sample[0]))
sst_data['X'].append(sample[1].split())
assert max(sst_data['y']) == self.nclasses - 1
return sst_data
def run(self, params, batcher):
sst_embed = {'train': {}, 'dev': {}, 'test': {}}
bsize = params.batch_size
for key in self.sst_data:
logging.info('Computing embedding for {0}'.format(key))
# Sort to reduce padding
sorted_data = sorted(zip(self.sst_data[key]['X'],
self.sst_data[key]['y']),
key=lambda z: (len(z[0]), z[1]))
self.sst_data[key]['X'], self.sst_data[key]['y'] = map(list, zip(*sorted_data))
sst_embed[key]['X'] = []
for ii in range(0, len(self.sst_data[key]['y']), bsize):
batch = self.sst_data[key]['X'][ii:ii + bsize]
embeddings = batcher(params, batch)
sst_embed[key]['X'].append(embeddings)
sst_embed[key]['X'] = np.vstack(sst_embed[key]['X'])
sst_embed[key]['y'] = np.array(self.sst_data[key]['y'])
logging.info('Computed {0} embeddings'.format(key))
config_classifier = {'nclasses': self.nclasses, 'seed': self.seed,
'usepytorch': params.usepytorch,
'classifier': params.classifier}
clf = SplitClassifier(X={'train': sst_embed['train']['X'],
'valid': sst_embed['dev']['X'],
'test': sst_embed['test']['X']},
y={'train': sst_embed['train']['y'],
'valid': sst_embed['dev']['y'],
'test': sst_embed['test']['y']},
config=config_classifier)
devacc, testacc = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for \
SST {2} classification\n'.format(devacc, testacc, self.task_name))
return {'devacc': devacc, 'acc': testacc,
'ndev': len(sst_embed['dev']['X']),
'ntest': len(sst_embed['test']['X'])}
| 3,946 | 39.690722 | 94 | py |
MCSE | MCSE-master/SentEval/senteval/tools/relatedness.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Semantic Relatedness (supervised) with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import copy
import numpy as np
import torch
from torch import nn
import torch.optim as optim
from scipy.stats import pearsonr, spearmanr
class RelatednessPytorch(object):
# Can be used for SICK-Relatedness, and STS14
def __init__(self, train, valid, test, devscores, config):
# fix seed
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
assert torch.cuda.is_available(), 'torch.cuda required for Relatedness'
torch.cuda.manual_seed(config['seed'])
self.train = train
self.valid = valid
self.test = test
self.devscores = devscores
self.inputdim = train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.l2reg = 0.
self.batch_size = 64
self.maxepoch = 1000
self.early_stop = True
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
nn.Softmax(dim=-1),
)
self.loss_fn = nn.MSELoss()
if torch.cuda.is_available():
self.model = self.model.cuda()
self.loss_fn = self.loss_fn.cuda()
self.loss_fn.size_average = False
self.optimizer = optim.Adam(self.model.parameters(),
weight_decay=self.l2reg)
def prepare_data(self, trainX, trainy, devX, devy, testX, testy):
# Transform probs to log-probs for KL-divergence
trainX = torch.from_numpy(trainX).float().cuda()
trainy = torch.from_numpy(trainy).float().cuda()
devX = torch.from_numpy(devX).float().cuda()
devy = torch.from_numpy(devy).float().cuda()
testX = torch.from_numpy(testX).float().cuda()
testY = torch.from_numpy(testy).float().cuda()
return trainX, trainy, devX, devy, testX, testy
def run(self):
self.nepoch = 0
bestpr = -1
early_stop_count = 0
r = np.arange(1, 6)
stop_train = False
# Preparing data
trainX, trainy, devX, devy, testX, testy = self.prepare_data(
self.train['X'], self.train['y'],
self.valid['X'], self.valid['y'],
self.test['X'], self.test['y'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
self.trainepoch(trainX, trainy, nepoches=50)
yhat = np.dot(self.predict_proba(devX), r)
pr = spearmanr(yhat, self.devscores)[0]
pr = 0 if pr != pr else pr # if NaN bc std=0
# early stop on Pearson
if pr > bestpr:
bestpr = pr
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
yhat = np.dot(self.predict_proba(testX), r)
return bestpr, yhat
def trainepoch(self, X, y, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().cuda()
Xbatch = X[idx]
ybatch = y[idx]
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
if len(probas) == 0:
probas = self.model(Xbatch).data.cpu().numpy()
else:
probas = np.concatenate((probas, self.model(Xbatch).data.cpu().numpy()), axis=0)
return probas
| 4,552 | 32.725926 | 100 | py |
MCSE | MCSE-master/SentEval/senteval/tools/validation.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Validation and classification
(train) : inner-kfold classifier
(train, test) : kfold classifier
(train, dev, test) : split classifier
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import numpy as np
from senteval.tools.classifier import MLP
import sklearn
assert(sklearn.__version__ >= "0.18.0"), \
"need to update sklearn to version >= 0.18.0"
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
def get_classif_name(classifier_config, usepytorch):
if not usepytorch:
modelname = 'sklearn-LogReg'
else:
nhid = classifier_config['nhid']
optim = 'adam' if 'optim' not in classifier_config else classifier_config['optim']
bs = 64 if 'batch_size' not in classifier_config else classifier_config['batch_size']
modelname = 'pytorch-MLP-nhid%s-%s-bs%s' % (nhid, optim, bs)
return modelname
# Pytorch version
class InnerKFoldClassifier(object):
"""
(train) split classifier : InnerKfold.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.featdim = X.shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.devresults = []
self.testresults = []
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
logging.info('Training {0} with (inner) {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True, random_state=1111)
innerskf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=1111)
count = 0
for train_idx, test_idx in skf.split(self.X, self.y):
count += 1
X_train, X_test = self.X[train_idx], self.X[test_idx]
y_train, y_test = self.y[train_idx], self.y[test_idx]
scores = []
for reg in regs:
regscores = []
for inner_train_idx, inner_test_idx in innerskf.split(X_train, y_train):
X_in_train, X_in_test = X_train[inner_train_idx], X_train[inner_test_idx]
y_in_train, y_in_test = y_train[inner_train_idx], y_train[inner_test_idx]
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_in_train, y_in_train,
validation_data=(X_in_test, y_in_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_in_train, y_in_train)
regscores.append(clf.score(X_in_test, y_in_test))
scores.append(round(100*np.mean(regscores), 2))
optreg = regs[np.argmax(scores)]
logging.info('Best param found at split {0}: l2reg = {1} \
with score {2}'.format(count, optreg, np.max(scores)))
self.devresults.append(np.max(scores))
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(X_train, y_train, validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(X_train, y_train)
self.testresults.append(round(100*clf.score(X_test, y_test), 2))
devaccuracy = round(np.mean(self.devresults), 2)
testaccuracy = round(np.mean(self.testresults), 2)
return devaccuracy, testaccuracy
class KFoldClassifier(object):
"""
(train, test) split classifier : cross-validation on train.
"""
def __init__(self, train, test, config):
self.train = train
self.test = test
self.featdim = self.train['X'].shape[1]
self.nclasses = config['nclasses']
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.k = 5 if 'kfold' not in config else config['kfold']
def run(self):
# cross-validation
logging.info('Training {0} with {1}-fold cross-validation'
.format(self.modelname, self.k))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-1, 6, 1)]
skf = StratifiedKFold(n_splits=self.k, shuffle=True,
random_state=self.seed)
scores = []
for reg in regs:
scanscores = []
for train_idx, test_idx in skf.split(self.train['X'],
self.train['y']):
# Split data
X_train, y_train = self.train['X'][train_idx], self.train['y'][train_idx]
X_test, y_test = self.train['X'][test_idx], self.train['y'][test_idx]
# Train classifier
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed)
clf.fit(X_train, y_train, validation_data=(X_test, y_test))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
scanscores.append(score)
# Append mean score
scores.append(round(100*np.mean(scanscores), 2))
# evaluation
logging.info([('reg:' + str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Cross-validation : best param found is reg = {0} \
with score {1}'.format(optreg, devaccuracy))
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed)
clf.fit(self.train['X'], self.train['y'], validation_split=0.05)
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.train['X'], self.train['y'])
yhat = clf.predict(self.test['X'])
testaccuracy = clf.score(self.test['X'], self.test['y'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy, yhat
class SplitClassifier(object):
"""
(train, valid, test) split classifier.
"""
def __init__(self, X, y, config):
self.X = X
self.y = y
self.nclasses = config['nclasses']
self.featdim = self.X['train'].shape[1]
self.seed = config['seed']
self.usepytorch = config['usepytorch']
self.classifier_config = config['classifier']
self.cudaEfficient = False if 'cudaEfficient' not in config else \
config['cudaEfficient']
self.modelname = get_classif_name(self.classifier_config, self.usepytorch)
self.noreg = False if 'noreg' not in config else config['noreg']
self.config = config
def run(self):
logging.info('Training {0} with standard validation..'
.format(self.modelname))
regs = [10**t for t in range(-5, -1)] if self.usepytorch else \
[2**t for t in range(-2, 4, 1)]
if self.noreg:
regs = [1e-9 if self.usepytorch else 1e9]
scores = []
for reg in regs:
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=reg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=reg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
scores.append(round(100*clf.score(self.X['valid'],
self.y['valid']), 2))
logging.info([('reg:'+str(regs[idx]), scores[idx])
for idx in range(len(scores))])
optreg = regs[np.argmax(scores)]
devaccuracy = np.max(scores)
logging.info('Validation : best param found is reg = {0} with score \
{1}'.format(optreg, devaccuracy))
clf = LogisticRegression(C=optreg, random_state=self.seed)
logging.info('Evaluating...')
if self.usepytorch:
clf = MLP(self.classifier_config, inputdim=self.featdim,
nclasses=self.nclasses, l2reg=optreg,
seed=self.seed, cudaEfficient=self.cudaEfficient)
# TODO: Find a hack for reducing nb epoches in SNLI
clf.fit(self.X['train'], self.y['train'],
validation_data=(self.X['valid'], self.y['valid']))
else:
clf = LogisticRegression(C=optreg, random_state=self.seed)
clf.fit(self.X['train'], self.y['train'])
testaccuracy = clf.score(self.X['test'], self.y['test'])
testaccuracy = round(100*testaccuracy, 2)
return devaccuracy, testaccuracy
| 10,358 | 40.939271 | 93 | py |
MCSE | MCSE-master/SentEval/senteval/tools/classifier.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Pytorch Classifier class in the style of scikit-learn
Classifiers include Logistic Regression and MLP
"""
from __future__ import absolute_import, division, unicode_literals
import numpy as np
import copy
from senteval import utils
import torch
from torch import nn
import torch.nn.functional as F
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0., batch_size=64, seed=1111,
cudaEfficient=False):
# fix seed
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_split(self, X, y, validation_data=None, validation_split=None):
# Preparing validation data
assert validation_split or validation_data
if validation_data is not None:
trainX, trainy = X, y
devX, devy = validation_data
else:
permutation = np.random.permutation(len(X))
trainidx = permutation[int(validation_split * len(X)):]
devidx = permutation[0:int(validation_split * len(X))]
trainX, trainy = X[trainidx], y[trainidx]
devX, devy = X[devidx], y[devidx]
device = torch.device('cpu') if self.cudaEfficient else torch.device('cuda')
trainX = torch.from_numpy(trainX).to(device, dtype=torch.float32)
trainy = torch.from_numpy(trainy).to(device, dtype=torch.int64)
devX = torch.from_numpy(devX).to(device, dtype=torch.float32)
devy = torch.from_numpy(devy).to(device, dtype=torch.int64)
return trainX, trainy, devX, devy
def fit(self, X, y, validation_data=None, validation_split=None,
early_stop=True):
self.nepoch = 0
bestaccuracy = -1
stop_train = False
early_stop_count = 0
# Preparing validation data
trainX, trainy, devX, devy = self.prepare_split(X, y, validation_data,
validation_split)
# Training
while not stop_train and self.nepoch <= self.max_epoch:
self.trainepoch(trainX, trainy, epoch_size=self.epoch_size)
accuracy = self.score(devX, devy)
if accuracy > bestaccuracy:
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if early_stop_count >= self.tenacity:
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + epoch_size):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
# forward
idx = torch.from_numpy(permutation[i:i + self.batch_size]).long().to(X.device)
Xbatch = X[idx]
ybatch = y[idx]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
# loss
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += epoch_size
def score(self, devX, devy):
self.model.eval()
correct = 0
if not isinstance(devX, torch.cuda.FloatTensor) or self.cudaEfficient:
devX = torch.FloatTensor(devX).cuda()
devy = torch.LongTensor(devy).cuda()
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
ybatch = devy[i:i + self.batch_size]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
pred = output.data.max(1)[1]
correct += pred.long().eq(ybatch.data.long()).sum().item()
accuracy = 1.0 * correct / len(devX)
return accuracy
def predict(self, devX):
self.model.eval()
if not isinstance(devX, torch.cuda.FloatTensor):
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
output = self.model(Xbatch)
yhat = np.append(yhat,
output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:i + self.batch_size]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if not probas:
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas
"""
MLP with Pytorch (nhid=0 --> Logistic Regression)
"""
class MLP(PyTorchClassifier):
def __init__(self, params, inputdim, nclasses, l2reg=0., batch_size=64,
seed=1111, cudaEfficient=False):
super(self.__class__, self).__init__(inputdim, nclasses, l2reg,
batch_size, seed, cudaEfficient)
"""
PARAMETERS:
-nhid: number of hidden units (0: Logistic Regression)
-optim: optimizer ("sgd,lr=0.1", "adam", "rmsprop" ..)
-tenacity: how many times dev acc does not increase before stopping
-epoch_size: each epoch corresponds to epoch_size pass on the train set
-max_epoch: max number of epoches
-dropout: dropout for MLP
"""
self.nhid = 0 if "nhid" not in params else params["nhid"]
self.optim = "adam" if "optim" not in params else params["optim"]
self.tenacity = 5 if "tenacity" not in params else params["tenacity"]
self.epoch_size = 4 if "epoch_size" not in params else params["epoch_size"]
self.max_epoch = 200 if "max_epoch" not in params else params["max_epoch"]
self.dropout = 0. if "dropout" not in params else params["dropout"]
self.batch_size = 64 if "batch_size" not in params else params["batch_size"]
if params["nhid"] == 0:
self.model = nn.Sequential(
nn.Linear(self.inputdim, self.nclasses),
).cuda()
else:
self.model = nn.Sequential(
nn.Linear(self.inputdim, params["nhid"]),
nn.Dropout(p=self.dropout),
nn.Sigmoid(),
nn.Linear(params["nhid"], self.nclasses),
).cuda()
self.loss_fn = nn.CrossEntropyLoss().cuda()
self.loss_fn.size_average = False
optim_fn, optim_params = utils.get_optimizer(self.optim)
self.optimizer = optim_fn(self.model.parameters(), **optim_params)
self.optimizer.param_groups[0]['weight_decay'] = self.l2reg
| 7,737 | 37.118227 | 94 | py |
MCSE | MCSE-master/SentEval/senteval/tools/ranking.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Image Annotation/Search for COCO with Pytorch
"""
from __future__ import absolute_import, division, unicode_literals
import logging
import copy
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
import torch.optim as optim
class COCOProjNet(nn.Module):
def __init__(self, config):
super(COCOProjNet, self).__init__()
self.imgdim = config['imgdim']
self.sentdim = config['sentdim']
self.projdim = config['projdim']
self.imgproj = nn.Sequential(
nn.Linear(self.imgdim, self.projdim),
)
self.sentproj = nn.Sequential(
nn.Linear(self.sentdim, self.projdim),
)
def forward(self, img, sent, imgc, sentc):
# imgc : (bsize, ncontrast, imgdim)
# sentc : (bsize, ncontrast, sentdim)
# img : (bsize, imgdim)
# sent : (bsize, sentdim)
img = img.unsqueeze(1).expand_as(imgc).contiguous()
img = img.view(-1, self.imgdim)
imgc = imgc.view(-1, self.imgdim)
sent = sent.unsqueeze(1).expand_as(sentc).contiguous()
sent = sent.view(-1, self.sentdim)
sentc = sentc.view(-1, self.sentdim)
imgproj = self.imgproj(img)
imgproj = imgproj / torch.sqrt(torch.pow(imgproj, 2).sum(1, keepdim=True)).expand_as(imgproj)
imgcproj = self.imgproj(imgc)
imgcproj = imgcproj / torch.sqrt(torch.pow(imgcproj, 2).sum(1, keepdim=True)).expand_as(imgcproj)
sentproj = self.sentproj(sent)
sentproj = sentproj / torch.sqrt(torch.pow(sentproj, 2).sum(1, keepdim=True)).expand_as(sentproj)
sentcproj = self.sentproj(sentc)
sentcproj = sentcproj / torch.sqrt(torch.pow(sentcproj, 2).sum(1, keepdim=True)).expand_as(sentcproj)
# (bsize*ncontrast, projdim)
anchor1 = torch.sum((imgproj*sentproj), 1)
anchor2 = torch.sum((sentproj*imgproj), 1)
img_sentc = torch.sum((imgproj*sentcproj), 1)
sent_imgc = torch.sum((sentproj*imgcproj), 1)
# (bsize*ncontrast)
return anchor1, anchor2, img_sentc, sent_imgc
def proj_sentence(self, sent):
output = self.sentproj(sent)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
def proj_image(self, img):
output = self.imgproj(img)
output = output / torch.sqrt(torch.pow(output, 2).sum(1, keepdim=True)).expand_as(output)
return output # (bsize, projdim)
class PairwiseRankingLoss(nn.Module):
"""
Pairwise ranking loss
"""
def __init__(self, margin):
super(PairwiseRankingLoss, self).__init__()
self.margin = margin
def forward(self, anchor1, anchor2, img_sentc, sent_imgc):
cost_sent = torch.clamp(self.margin - anchor1 + img_sentc,
min=0.0).sum()
cost_img = torch.clamp(self.margin - anchor2 + sent_imgc,
min=0.0).sum()
loss = cost_sent + cost_img
return loss
class ImageSentenceRankingPytorch(object):
# Image Sentence Ranking on COCO with Pytorch
def __init__(self, train, valid, test, config):
# fix seed
self.seed = config['seed']
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
self.train = train
self.valid = valid
self.test = test
self.imgdim = len(train['imgfeat'][0])
self.sentdim = len(train['sentfeat'][0])
self.projdim = config['projdim']
self.margin = config['margin']
self.batch_size = 128
self.ncontrast = 30
self.maxepoch = 20
self.early_stop = True
config_model = {'imgdim': self.imgdim,'sentdim': self.sentdim,
'projdim': self.projdim}
self.model = COCOProjNet(config_model).cuda()
self.loss_fn = PairwiseRankingLoss(margin=self.margin).cuda()
self.optimizer = optim.Adam(self.model.parameters())
def prepare_data(self, trainTxt, trainImg, devTxt, devImg,
testTxt, testImg):
trainTxt = torch.FloatTensor(trainTxt)
trainImg = torch.FloatTensor(trainImg)
devTxt = torch.FloatTensor(devTxt).cuda()
devImg = torch.FloatTensor(devImg).cuda()
testTxt = torch.FloatTensor(testTxt).cuda()
testImg = torch.FloatTensor(testImg).cuda()
return trainTxt, trainImg, devTxt, devImg, testTxt, testImg
def run(self):
self.nepoch = 0
bestdevscore = -1
early_stop_count = 0
stop_train = False
# Preparing data
logging.info('prepare data')
trainTxt, trainImg, devTxt, devImg, testTxt, testImg = \
self.prepare_data(self.train['sentfeat'], self.train['imgfeat'],
self.valid['sentfeat'], self.valid['imgfeat'],
self.test['sentfeat'], self.test['imgfeat'])
# Training
while not stop_train and self.nepoch <= self.maxepoch:
logging.info('start epoch')
self.trainepoch(trainTxt, trainImg, devTxt, devImg, nepoches=1)
logging.info('Epoch {0} finished'.format(self.nepoch))
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
score = 0
for i in range(5):
devTxt_i = devTxt[i*5000:(i+1)*5000]
devImg_i = devImg[i*5000:(i+1)*5000]
# Compute dev ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg_i,
devTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
logging.info("Image to text: {0}, {1}, {2}, {3}"
.format(r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute dev ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg_i,
devTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
logging.info("Text to Image: {0}, {1}, {2}, {3}"
.format(r1_t2i, r5_t2i, r10_t2i, medr_t2i))
score += (r1_i2t + r5_i2t + r10_i2t +
r1_t2i + r5_t2i + r10_t2i) / 5
logging.info("Dev mean Text to Image: {0}, {1}, {2}, {3}".format(
results['t2i']['r1'], results['t2i']['r5'],
results['t2i']['r10'], results['t2i']['medr']))
logging.info("Dev mean Image to text: {0}, {1}, {2}, {3}".format(
results['i2t']['r1'], results['i2t']['r5'],
results['i2t']['r10'], results['i2t']['medr']))
# early stop on Pearson
if score > bestdevscore:
bestdevscore = score
bestmodel = copy.deepcopy(self.model)
elif self.early_stop:
if early_stop_count >= 3:
stop_train = True
early_stop_count += 1
self.model = bestmodel
# Compute test for the 5 splits
results = {'i2t': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
't2i': {'r1': 0, 'r5': 0, 'r10': 0, 'medr': 0},
'dev': bestdevscore}
for i in range(5):
testTxt_i = testTxt[i*5000:(i+1)*5000]
testImg_i = testImg[i*5000:(i+1)*5000]
# Compute test ranks img2txt
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(testImg_i, testTxt_i)
results['i2t']['r1'] += r1_i2t / 5
results['i2t']['r5'] += r5_i2t / 5
results['i2t']['r10'] += r10_i2t / 5
results['i2t']['medr'] += medr_i2t / 5
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(testImg_i, testTxt_i)
results['t2i']['r1'] += r1_t2i / 5
results['t2i']['r5'] += r5_t2i / 5
results['t2i']['r10'] += r10_t2i / 5
results['t2i']['medr'] += medr_t2i / 5
return bestdevscore, results['i2t']['r1'], results['i2t']['r5'], \
results['i2t']['r10'], results['i2t']['medr'], \
results['t2i']['r1'], results['t2i']['r5'], \
results['t2i']['r10'], results['t2i']['medr']
def trainepoch(self, trainTxt, trainImg, devTxt, devImg, nepoches=1):
self.model.train()
for _ in range(self.nepoch, self.nepoch + nepoches):
permutation = list(np.random.permutation(len(trainTxt)))
all_costs = []
for i in range(0, len(trainTxt), self.batch_size):
# forward
if i % (self.batch_size*500) == 0 and i > 0:
logging.info('samples : {0}'.format(i))
r1_i2t, r5_i2t, r10_i2t, medr_i2t = self.i2t(devImg,
devTxt)
logging.info("Image to text: {0}, {1}, {2}, {3}".format(
r1_i2t, r5_i2t, r10_i2t, medr_i2t))
# Compute test ranks txt2img
r1_t2i, r5_t2i, r10_t2i, medr_t2i = self.t2i(devImg,
devTxt)
logging.info("Text to Image: {0}, {1}, {2}, {3}".format(
r1_t2i, r5_t2i, r10_t2i, medr_t2i))
idx = torch.LongTensor(permutation[i:i + self.batch_size])
imgbatch = Variable(trainImg.index_select(0, idx)).cuda()
sentbatch = Variable(trainTxt.index_select(0, idx)).cuda()
idximgc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idxsentc = np.random.choice(permutation[:i] +
permutation[i + self.batch_size:],
self.ncontrast*idx.size(0))
idximgc = torch.LongTensor(idximgc)
idxsentc = torch.LongTensor(idxsentc)
# Get indexes for contrastive images and sentences
imgcbatch = Variable(trainImg.index_select(0, idximgc)).view(
-1, self.ncontrast, self.imgdim).cuda()
sentcbatch = Variable(trainTxt.index_select(0, idxsentc)).view(
-1, self.ncontrast, self.sentdim).cuda()
anchor1, anchor2, img_sentc, sent_imgc = self.model(
imgbatch, sentbatch, imgcbatch, sentcbatch)
# loss
loss = self.loss_fn(anchor1, anchor2, img_sentc, sent_imgc)
all_costs.append(loss.data.item())
# backward
self.optimizer.zero_grad()
loss.backward()
# Update parameters
self.optimizer.step()
self.nepoch += nepoches
def t2i(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
idxs = torch.cuda.LongTensor(range(0, len(img_embed), 5))
ims = img_embed.index_select(0, idxs)
ranks = np.zeros(5 * npts)
for index in range(npts):
# Get query captions
queries = sent_embed[5*index: 5*index + 5]
# Compute scores
scores = torch.mm(queries, ims.transpose(0, 1)).cpu().numpy()
inds = np.zeros(scores.shape)
for i in range(len(inds)):
inds[i] = np.argsort(scores[i])[::-1]
ranks[5 * index + i] = np.where(inds[i] == index)[0][0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
def i2t(self, images, captions):
"""
Images: (5N, imgdim) matrix of images
Captions: (5N, sentdim) matrix of captions
"""
with torch.no_grad():
# Project images and captions
img_embed, sent_embed = [], []
for i in range(0, len(images), self.batch_size):
img_embed.append(self.model.proj_image(
Variable(images[i:i + self.batch_size])))
sent_embed.append(self.model.proj_sentence(
Variable(captions[i:i + self.batch_size])))
img_embed = torch.cat(img_embed, 0).data
sent_embed = torch.cat(sent_embed, 0).data
npts = int(img_embed.size(0) / 5)
index_list = []
ranks = np.zeros(npts)
for index in range(npts):
# Get query image
query_img = img_embed[5 * index]
# Compute scores
scores = torch.mm(query_img.view(1, -1),
sent_embed.transpose(0, 1)).view(-1)
scores = scores.cpu().numpy()
inds = np.argsort(scores)[::-1]
index_list.append(inds[0])
# Score
rank = 1e20
for i in range(5*index, 5*index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
return (r1, r5, r10, medr)
| 15,275 | 41.433333 | 109 | py |
openqasm | openqasm-main/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_extensions'))
# -- Project information -----------------------------------------------------
from typing import List
version = os.getenv('VERSION','Live')
project = f'OpenQASM {version} Specification'
copyright = '2017-2023, Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
author = 'Andrew W. Cross, Lev S. Bishop, John A. Smolin, Jay M. Gambetta'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
'reno.sphinxext',
'multifigure'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: List[str] = [
"openqasm/docs",
]
# Sets the default code-highlighting language. `.. code-block::` directives
# that are not OQ3 should specify the language manually. The value is
# interpreted as a Pygments lexer alias; this needs the dependency
# `openqasm_pygments`.
highlight_language = "qasm3"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
version_list_var = os.getenv('VERSION_LIST')
extra_nav_links = {'Live Version': '/index.html'} # default link to Live version
if version_list_var is not None:
version_list = version_list_var.split(',')
for ver in version_list:
extra_nav_links[f'Version {ver}'] = f'/versions/{ver}/index.html'
print(extra_nav_links)
# Theme specific options
html_theme_options = {
'extra_nav_links': extra_nav_links
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The URL which points to the root of the HTML documentation. It is used to
# indicate the location of document like canonical_url.
html_baseurl = os.getenv('HTML_BASEURL', '')
# Add css styles for colored text
html_css_files = ['colors.css']
# If True, figures, tables and code-blocks are automatically numbered
# if they have a caption.
numfig = True
# Necessary setting for sphinxcontrib-bibtex >= 2.0.0
bibtex_bibfiles = ['bibliography.bib']
# This is the list of local variables to export into sphinx by using the
# rst_epilogue below. Using this mechanism we can export the local 'version'
# variable, which can be defined by an environment variable, into the sphinx
# build system for changing the text to specify which specific version of the
# specification is being built
variables_to_export = [
"version",
]
frozen_locals = dict(locals())
rst_epilog = '\n'.join(map(lambda x: f".. |{x}| replace:: {frozen_locals[x]}", variables_to_export))
del frozen_locals
# Monkey-patch docutils 0.19.0 with a fix to `Node.previous_sibling` that is the
# root cause of incorrect HTML output for bibliograhy files (see gh-455).
# docutils is pinned in `constraints.txt` to a version that is known to work
# with this patch. If docutils releases a new version, this monkeypatching and
# the constraint may be able to be dropped.
import docutils.nodes
# This method is taken from docutils revision r9126, which is to a file
# explicitly placed in the public domain; there is no licence clause.
def previous_sibling(self):
if not self.parent:
return None
index = self.parent.index(self)
return self.parent[index - 1] if index > 0 else None
docutils.nodes.Node.previous_sibling = previous_sibling
| 4,580 | 35.943548 | 100 | py |
ULR | ULR-main/dual-encoder/L2/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = []
text_embeddings = []
category_embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
# code.interact(local=locals())
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1]
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
category_embeddings.append(rep1.data.cpu().numpy())
text_embeddings.append(rep2.data.cpu().numpy())
nb_eval_steps += 1
if preds is None:
preds.append(score.detach().cpu().numpy().reshape((-1)))
else:
preds.append(score.detach().cpu().numpy().reshape((-1)))
preds = np.concatenate(preds, 0)
preds = preds.reshape((-1, len(cats)))
logger.info("save prediction file to eval_output_dir")
out_file_name = ".".join(file_path.split("/")[-2:])
text_embeddings = np.concatenate(text_embeddings, 0)
category_embeddings = np.concatenate(category_embeddings, 0)
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".text.txt"), text_embeddings[::len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".category.txt"), category_embeddings[:len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument(
"--all_cats_file", default=None, type=str, help="The file containing all category names",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = os.path.join(args.eval_data_dir, "test.csv")
# if args.task_name == "nyt":
if args.multi_class:
file_path = os.path.join(args.eval_data_dir, "test.doc.txt")
if args.all_cats_file is not None:
all_cats_file = args.all_cats_file
else:
all_cats_file = os.path.join(args.eval_data_dir, "classes.txt.acl")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, args.eval_data_file, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 23,469 | 39.25729 | 164 | py |
ULR | ULR-main/dual-encoder/L2/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import torch.nn.functional as F
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1] # now the score will be between -1 and 1
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
score = score.view(-1, 2)
# code.interact(local=locals())
score = score[:, 0] + args.margin - score[:, 1]
score[score <= 0] = 0
loss = score.mean()
#code.interact(local=locals())
# if label is 1, we want score to be high, so we negate it
# if label is 0, we want the score to be low, so we keep the sign
# loss = 2 - (labels - 0.5) * 2 * cosine_score
# loss = (1 - 2 * labels) * cosine_score
# This loss function does not give good model performances
# loss = labels * 2 - (labels - 0.5) * 2 * (cosine_score + 1)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss = loss.mean()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--margin",
default=0.5,
type=float,
help="The margin to train with hinge loss",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = SequentialSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 33,817 | 41.753477 | 177 | py |
ULR | ULR-main/dual-encoder/cosine/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = []
text_embeddings = []
category_embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
# code.interact(local=locals())
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1]
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
if args.similarity_function == "dot":
score = torch.sum(rep1 * rep2, -1) # now the score will be between -1 and 1
elif args.similarity_function == "cosine":
score = F.cosine_similarity(rep1, rep2) # now the score will be between -1 and 1
category_embeddings.append(rep1.data.cpu().numpy())
text_embeddings.append(rep2.data.cpu().numpy())
nb_eval_steps += 1
if preds is None:
preds.append(score.detach().cpu().numpy().reshape((-1)))
else:
preds.append(score.detach().cpu().numpy().reshape((-1)))
preds = np.concatenate(preds, 0)
preds = preds.reshape((-1, len(cats)))
logger.info("save prediction file to eval_output_dir")
out_file_name = ".".join(file_path.split("/")[-2:])
text_embeddings = np.concatenate(text_embeddings, 0)
category_embeddings = np.concatenate(category_embeddings, 0)
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".text.txt"), text_embeddings[::len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".category.txt"), category_embeddings[:len(cats)])
np.savetxt(os.path.join(eval_output_dir, out_file_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--similarity_function",
default="pool",
choices=["dot", "cosine"],
type=str,
help="The similarity scoring function",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument(
"--all_cats_file", default=None, type=str, help="The file containing all category names",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = os.path.join(args.eval_data_dir, "test.csv")
# if args.task_name == "nyt":
if args.all_cats_file is not None:
all_cats_file = args.all_cats_file
else:
all_cats_file = os.path.join(args.eval_data_dir, "classes.txt.acl")
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
# result = evaluate(args, model, tokenizer, file_path, all_cats_file, prefix=prefix)
result = evaluate(args, model, tokenizer, args.eval_data_file, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 22,345 | 39.190647 | 164 | py |
ULR | ULR-main/dual-encoder/cosine/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
import torch.nn.functional as F
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils import dual_encoder_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
if args.similarity_function == "dot":
crit = torch.nn.BCEWithLogitsLoss()
elif args.similarity_function == "euclidean":
pass
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs_a = {"input_ids": batch[0], "attention_mask": batch[1]}
inputs_b = {"input_ids": batch[3], "attention_mask": batch[4]}
labels = batch[6]
if args.model_type != "distilbert":
inputs_a["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
inputs_b["token_type_ids"] = (
batch[5] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs_a = model(**inputs_a)
outputs_b = model(**inputs_b)
if args.bert_representation == "pool":
rep1 = outputs_a[1]
rep2 = outputs_b[1] # now the score will be between -1 and 1
elif args.bert_representation == "avg":
rep1 = torch.sum(outputs_a[0] * batch[1].unsqueeze(-1), 1) / (torch.sum(batch[1], 1, keepdim=True) + 1e-8)
rep2 = torch.sum(outputs_b[0] * batch[4].unsqueeze(-1), 1) / (torch.sum(batch[4], 1, keepdim=True) + 1e-8)
if args.similarity_function == "dot":
score = torch.sum(rep1 * rep2, -1) # now the score will be between -1 and 1
loss = crit(score, labels.float())
elif args.similarity_function == "euclidean":
score = torch.sum((rep1 - rep2) * (rep1 - rep2), -1)
#code.interact(local=locals())
# if label is 1, we want score to be high, so we negate it
# if label is 0, we want the score to be low, so we keep the sign
# loss = 2 - (labels - 0.5) * 2 * cosine_score
# loss = (1 - 2 * labels) * cosine_score
# This loss function does not give good model performances
# loss = labels * 2 - (labels - 0.5) * 2 * (cosine_score + 1)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss = loss.mean()
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
)
# Convert to Tensors and build dataset
all_input_ids_a = torch.tensor([f.input_ids_a for f in features], dtype=torch.long)
all_attention_mask_a = torch.tensor([f.attention_mask_a for f in features], dtype=torch.long)
all_token_type_ids_a = torch.tensor([f.token_type_ids_a if f.token_type_ids_a is not None else [0]*len(f.attention_mask_a) for f in features], dtype=torch.long)
all_input_ids_b = torch.tensor([f.input_ids_b for f in features], dtype=torch.long)
all_attention_mask_b = torch.tensor([f.attention_mask_b for f in features], dtype=torch.long)
all_token_type_ids_b = torch.tensor([f.token_type_ids_b if f.token_type_ids_b is not None else [0]*len(f.attention_mask_b) for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids_a, all_attention_mask_a, all_token_type_ids_a, all_input_ids_b, all_attention_mask_b, all_token_type_ids_b, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--bert_representation",
default="pool",
choices=["avg", "pool"],
type=str,
help="The BERT representation type",
)
parser.add_argument(
"--similarity_function",
default="pool",
choices=["dot"],
type=str,
help="The similarity scoring function",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 34,036 | 41.813836 | 177 | py |
ULR | ULR-main/single-encoder/eval_downstream_task.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import code
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(args, model, tokenizer, file_path, cat_file_path, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
cats = []
with open(cat_file_path) as fin:
for line in fin:
cats.append(line.strip())
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, file_path, cat_file_path)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
preds = preds[:,1] - preds[:,0] # try argmax over pos scores
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
preds = preds.reshape((-1, len(cats)))
# code.interact(local=locals())
logger.info("save prediction file to eval_output_dir")
# out_file_name = cat_file_path.split("/")[-2]
task_name = args.class_file_name.split("/")[-2]
np.savetxt(os.path.join(eval_output_dir, task_name + ".preds.txt"), preds)
return results
def load_and_cache_examples(args, task, tokenizer, file_path, cat_file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](cat_file_path=cat_file_path)
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
# if args.task_name == "nyt":
if args.multi_class:
examples = (processor.get_examples(file_path, args.label_filepath))
else:
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--eval_data_dir",
default=None,
type=str,
help="The directory containing the evaluation dataset",
)
parser.add_argument(
"--eval_data_file",
default=None,
type=str,
required=True,
help="The file containing the evaluation dataset",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--label_filepath",
default=None,
type=str,
help="Path to the label file for the nyt dataset",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--multi_class", action="store_true", help="Whether it is a multi class classfication task.")
parser.add_argument("--class_file_name", type=str, default="classes.txt.acl", help="The file containing all class descriptions")
parser.add_argument("--pred_file_suffix", type=str, default=None, help="Suffix after the prediction file")
parser.add_argument("--pred_file_prefix", type=str, default=None, help="Prefix before the prediction file")
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
file_path = args.eval_data_file
# if args.task_name == "nyt":
# all_cats_file = os.path.join(args.eval_data_dir, args.class_file_name)
all_cats_file = args.class_file_name
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, file_path, all_cats_file, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 22,069 | 38.981884 | 150 | py |
ULR | ULR-main/single-encoder/train_natcat.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import argparse
import glob
import json
import logging
import os
import re
import random
import shutil
import pickle
from typing import Dict, List, Tuple
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
FlaubertConfig,
FlaubertForSequenceClassification,
FlaubertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from utils import compute_metrics
from utils import output_modes
from utils import processors
from utils import DataFiles
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
BertConfig,
XLNetConfig,
XLMConfig,
RobertaConfig,
DistilBertConfig,
AlbertConfig,
XLMRobertaConfig,
FlaubertConfig,
)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"xlnet": (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
"flaubert": (FlaubertConfig, FlaubertForSequenceClassification, FlaubertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=0):
""" Train the model """
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
set_seed(args) # Added here for reproductibility
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
return global_step, tr_loss / global_step, optimizer, scheduler
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None
) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, file_path):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
# cached_features_file = os.path.join(
# args.data_dir,
# "cached_{}_{}_{}".format(
# file_path,
# str(args.max_seq_length),
# str(task),
# ),
# )
# Load data features from cache or dataset file
# if os.path.exists(cached_features_file) and not args.overwrite_cache:
# logger.info("Loading features from cached file %s", cached_features_file)
# features = torch.load(cached_features_file)
# else:
logger.info("Loading from dataset file at %s", file_path)
label_list = processor.get_labels()
examples = (processor.get_examples(file_path))
logger.info("Encoding features from dataset file at %s", file_path)
# if args.local_rank in [-1, 0]:
# logger.info("Saving features into cached file %s", cached_features_file)
# torch.save(features, cached_features_file)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--save_total_limit",
type=int,
default=None,
help="Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
# set to load the latest checkpoint for training
args.model_name_or_path = args.output_dir
all_model_checkpoints = [ckpt for ckpt in os.listdir(args.model_name_or_path) if os.path.isdir(os.path.join(args.model_name_or_path, ckpt))]
all_model_checkpoints = [(ckpt.split("-")[-1] if "-" in ckpt else -1, ckpt) for ckpt in all_model_checkpoints]
all_model_checkpoints.sort(reverse=True)
args.model_name_or_path = os.path.join(args.model_name_or_path, all_model_checkpoints[0][1])
logger.info("setting to load the model from %s", args.model_name_or_path)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
num_labels = 2
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
datafiles = DataFiles(args.data_dir)
if os.path.isfile(os.path.join(args.model_name_or_path, "datafiles.txt")):
datafiles.load(os.path.join(args.model_name_or_path, "datafiles.txt"))
global_step = 0
shard_count = 0
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
while True:
todo_file = datafiles.next()
if not todo_file:
break
if args.local_rank == 0:
torch.distributed.barrier()
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, todo_file)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if shard_count == 0: # if this is the first shard, create the optimizer or load from the previous checkpoint
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs * len(datafiles.all_files) # 280 shards of data files in total
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
logger.info("loading optimizer and scheduler from %s", args.model_name_or_path)
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
if shard_count == 0:
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
# set global_step to global_step of last saved checkpoint from model path
try:
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
except ValueError:
global_step = 0
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint %s", args.model_name_or_path)
logger.info(" Continuing training from global step %d", global_step)
global_step, tr_loss, optimizer, scheduler = train(args, train_dataset, train_dataloader, model, tokenizer, optimizer, scheduler, tb_writer, global_step=global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
datafiles.save(os.path.join(output_dir, "datafiles.txt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
_rotate_checkpoints(args, "checkpoint")
shard_count += 1
if args.local_rank in [-1, 0]:
tb_writer.close()
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| 31,833 | 41.902965 | 177 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/main.py | import argparse
from os import path, makedirs
from experiments import select_experiment
import torch
import yaml
import os
def create_dir_structure(config):
subdirs = ["ckpt", "config", "generated", "log"]
structure = {subdir: path.join(config["base_dir"],config["experiment"],subdir,config["project_name"]) for subdir in subdirs}
if "DATAPATH" in os.environ:
structure = {subdir: path.join(os.environ["DATAPATH"],structure[subdir]) for subdir in structure}
return structure
def load_parameters(config_name, restart,debug,project_name):
with open(config_name,"r") as f:
cdict = yaml.load(f,Loader=yaml.FullLoader)
if debug:
cdict['general']['project_name'] = 'debug'
else:
cdict['general']['project_name'] = project_name
dir_structure = create_dir_structure(cdict["general"])
saved_config = path.join(dir_structure["config"], "config.yaml")
if restart:
if path.isfile(saved_config):
with open(saved_config,"r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise FileNotFoundError("No saved config file found but model is intended to be restarted. Aborting....")
else:
[makedirs(dir_structure[d],exist_ok=True) for d in dir_structure]
if path.isfile(saved_config) and not debug:
print(f"\033[93m" + "WARNING: Model has been started somewhen earlier: Resume training (y/n)?" + "\033[0m")
while True:
answer = input()
if answer == "y" or answer == "yes":
with open(saved_config,"r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
restart = True
break
elif answer == "n" or answer == "no":
with open(saved_config, "w") as f:
yaml.dump(cdict, f, default_flow_style=False)
break
else:
print(f"\033[93m" + "Invalid answer! Try again!(y/n)" + "\033[0m")
else:
with open(saved_config, "w") as f:
yaml.dump(cdict,f,default_flow_style=False)
cdict['general']['debug'] = debug
return cdict, dir_structure, restart
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str,
default="config/latent_flow_net.yaml",
help="Define config file")
parser.add_argument('-p','--project_name',type=str,default='ii2v',help='unique name for the training run to be (re-)started.')
parser.add_argument("-r","--restart", default=False,action="store_true",help="Whether training should be resumed.")
parser.add_argument("-d", "--debug", default=False, action="store_true", help="Whether training should be resumed.")
parser.add_argument("--gpu",default=[0], type=int,
nargs="+",help="GPU to use.")
parser.add_argument("-m","--mode",default="train",type=str,choices=["train","test"],help="Whether to start in train or infer mode?")
parser.add_argument("--test_mode",default="metrics",type=str, choices=["noise_test","metrics","fvd",'diversity','render'], help="The mode in which the test-method should be executed.")
parser.add_argument("--metrics_on_patches", default=False,action="store_true",help="Whether to run evaluation on patches (if available or not).")
parser.add_argument("--best_ckpt", default=False, action="store_true",help="Whether to use the best ckpt as measured by LPIPS (otherwise, latest_ckpt is used)")
args = parser.parse_args()
config, structure, restart = load_parameters(args.config, args.restart or args.mode == "test",args.debug,args.project_name)
config["general"]["restart"] = restart
config["general"]["mode"] = args.mode
# config["general"]["first_stage"] = args.first_stage
if len(args.gpu) == 1:
gpus = torch.device(
f"cuda:{int(args.gpu[0])}"
if torch.cuda.is_available() and int(args.gpu[0]) >= 0
else "cpu"
)
torch.cuda.set_device(gpus)
else:
gpus = [int(id) for id in args.gpu]
mode = config["general"]["mode"]
config["testing"].update({"best_ckpt": args.best_ckpt})
if mode == "test" and "testing" in config and "metrics_on_patches" in config["testing"]:
config["testing"]["metrics_on_patches"] = args.metrics_on_patches
experiment = select_experiment(config, structure, gpus)
# start selected experiment
if mode == "train":
experiment.train()
elif mode == "test":
config["testing"].update({"mode": args.test_mode})
experiment.test()
else:
raise ValueError(f"\"mode\"-parameter should be either \"train\" or \"infer\" but is actually {mode}") | 4,862 | 44.448598 | 188 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.